ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5c6961c6796ce322817d48c735b2f85475727d
|
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.permissions import IsAdminUser
from django.utils import timezone
from users.models import User
# 日活跃用户统计
# GET /meiduo_admin/statistical/day_active/
class UserDayActiveView(APIView):
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取日活用户量:
1. 获取日活用户量
2. 返回应答
"""
# 1. 获取日活用户量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(last_login__gte=now_date).count()
# 2. 返回应答
response_data = {
'date': now_date.date(),
'count': count
}
return Response(response_data)
# 日下单用户统计
# GET /meiduo_admin/statistical/day_orders/
class UserDayOrdersView(APIView):
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取日下单用户数量:
1. 获取日下单用户数量
2. 返回应答
"""
# 1. 获取日下单用户数量
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
count = User.objects.filter(orders__create_time__gte=now_date).distinct().count()
# 2. 返回应答
response_data = {
'date': now_date.date(),
'count': count
}
return Response(response_data)
# 30天新增用户统计
# GET /meiduo_admin/statistical/month_increment/
class UserMonthCountView(APIView):
permission_classes = [IsAdminUser]
def get(self, request):
"""
获取当月每日新增用户数据:
1. 获取当月每日新增用户数据
2. 返回应答
"""
# 1. 获取当月每日新增用户数据
# 结束时间
now_date = timezone.now().replace(hour=0, minute=0, second=0, microsecond=0)
# 起始时间: now_date - 29天
begin_date = now_date - timezone.timedelta(days=29)
# 当天日期
current_date = begin_date
# 新增用户的数量
month_li = []
while current_date <= now_date:
# 次日时间
next_date = current_date + timezone.timedelta(days=1)
# 统计当天的新增用户数量
count = User.objects.filter(date_joined__gte=current_date,
date_joined__lt=next_date).count()
month_li.append({
'count': count,
'date': current_date.date()
})
current_date += timezone.timedelta(days=1)
# 2. 返回应答
return Response(month_li)
|
py
|
1a5c69b73aeeced73f2bb2f758e09102050a9eae
|
# Generated by Django 3.2.7 on 2021-09-25 17:54
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('ProjectManager', '0003_auto_20210926_0053'),
]
operations = [
migrations.AddField(
model_name='project',
name='Users',
field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='project',
name='description',
field=models.TextField(blank=True),
),
migrations.AddField(
model_name='project',
name='schoolYear',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, to='ProjectManager.schoolyear'),
),
]
|
py
|
1a5c6a48d04a9cdc9c5cfe03b10e9659337c665b
|
# Natural Language Toolkit: Utility functions
#
# Copyright (C) 2001-2006 University of Pennsylvania
# Author: Steven Bird <[email protected]>
# URL: <http://nltk.sf.net>
# For license information, see LICENSE.TXT
##########################################################################
# PRETTY PRINTING
##########################################################################
def pr(data, start=0, end=None):
"""
Pretty print a sequence of data items
@param data: the data stream to print
@type data: C{sequence} or C{iterator}
@param start: the start position
@type start: C{int}
@param end: the end position
@type end: C{int}
"""
from pprint import pprint
from itertools import islice
pprint(list(islice(data, start, end)))
def print_string(s, width=70):
"""
Pretty print a string, breaking lines on whitespace
@param s: the string to print, consisting of words and spaces
@type s: C{string}
@param width: the display width
@type width: C{int}
"""
import re
while s:
s = s.strip()
try:
i = s[:width].rindex(' ')
except ValueError:
print s
return
print s[:i]
s = s[i:]
class SortedDict(dict):
"""
A very rudamentary sorted dictionary, whose main purpose is to
allow dictionaries to be displayed in a consistent order in
regression tests. keys(), items(), values(), iter*(), and
__repr__ all sort their return values before returning them.
(note that the sort order for values() does *not* correspond to
the sort order for keys(). I.e., zip(d.keys(), d.values()) is not
necessarily equal to d.items().
"""
def keys(self): return sorted(dict.keys(self))
def items(self): return sorted(dict.items(self))
def values(self): return sorted(dict.values(self))
def iterkeys(self): return iter(sorted(dict.keys(self)))
def iteritems(self): return iter(sorted(dict.items(self)))
def itervalues(self): return iter(sorted(dict.values(self)))
def __iter__(self): return iter(sorted(dict.keys(self)))
def repr(self):
items = ['%s=%s' % t for t in sorted(self.items())]
return '{%s}' % ', '.join(items)
##########################################################################
# EDIT DISTANCE (LEVENSHTEIN)
##########################################################################
def _edit_dist_init(len1, len2):
lev = []
for i in range(len1):
lev.append([0] * len2) # initialize 2-D array to zero
for i in range(len1):
lev[i][0] = i # column 0: 0,1,2,3,4,...
for j in range(len2):
lev[0][j] = j # row 0: 0,1,2,3,4,...
return lev
def _edit_dist_step(lev, i, j, c1, c2):
a = lev[i-1][j ] + 1 # skipping s1[i]
b = lev[i-1][j-1] + (c1 != c2) # matching s1[i] with s2[j]
c = lev[i ][j-1] + 1 # skipping s2[j]
lev[i][j] = min(a,b,c) # pick the cheapest
def edit_dist(s1, s2):
"""
Calculate the Levenshtein edit-distance between two strings.
The edit distance is the number of characters that need to be
substituted, inserted, or deleted, to transform s1 into s2. For
example, transforming "rain" to "shine" requires three steps,
consisting of two substitutions and one insertion:
"rain" -> "sain" -> "shin" -> "shine". These operations could have
been done in other orders, but at least three steps are needed.
@param s1, s2: The strings to be analysed
@type s1, s2: C{string}
@rtype C{int}
"""
# set up a 2-D array
len1 = len(s1); len2 = len(s2)
lev = _edit_dist_init(len1+1, len2+1)
# iterate over the array
for i in range(len1):
for j in range (len2):
_edit_dist_step(lev, i+1, j+1, s1[i], s2[j])
return lev[len1][len2]
##########################################################################
# MINIMAL SETS
##########################################################################
class MinimalSet(object):
"""
Find contexts where more than one possible target value can
appear. E.g. if targets are word-initial letters, and contexts
are the remainders of words, then we would like to find cases like
"fat" vs "cat", and "training" vs "draining". If targets are
parts-of-speech and contexts are words, then we would like to find
cases like wind (noun) 'air in rapid motion', vs wind (verb)
'coil, wrap'.
"""
def __init__(self, parameters=None):
"""
Create a new minimal set.
@param parameters: The (context, target, display) tuples for the item
@type parameters: C{list} of C{tuple} of C{string}
"""
self._targets = set() # the contrastive information
self._contexts = set() # what we are controlling for
self._seen = {} # to record what we have seen
self._displays = {} # what we will display
for context, target, display in parameters:
self.add(context, target, display)
def add(self, context, target, display):
"""
Add a new item to the minimal set, having the specified
context, target, and display form.
@param context: The context in which the item of interest appears
@type context: C{string}
@param target: The item of interest
@type target: C{string}
@param display: The information to be reported for each item
@type display: C{string}
"""
# Store the set of targets that occurred in this context
if context not in self._seen:
self._seen[context] = set()
self._seen[context].add(target)
# Keep track of which contexts and targets we have seen
self._contexts.add(context)
self._targets.add(target)
# For a given context and target, store the display form
self._displays[(context, target)] = display
def contexts(self, minimum=2):
"""
Determine which contexts occurred with enough distinct targets.
@param minimum: the minimum number of distinct target forms
@type minimum: C(int)
@rtype C(list)
"""
return [c for c in self._contexts if len(self._seen[c]) >= minimum]
def display(self, context, target, default=""):
if self._displays.has_key((context, target)):
return self._displays[(context, target)]
else:
return default
def display_all(self, context):
result = []
for target in self._targets:
x = self.display(context, target)
if x: result.append(x)
return result
def targets(self):
return self._targets
######################################################################
## Regexp display (thanks to David Mertz)
######################################################################
import re
def re_show(regexp, string):
"""
Search C{string} for substrings matching C{regexp} and wrap
the matches with braces. This is convenient for learning about
regular expressions.
@param regexp: The regular expression.
@param string: The string being matched.
@rtype: C{string}
@return: A string with braces surrounding the matched substrings.
"""
print re.compile(regexp, re.M).sub("{\g<0>}", string.rstrip())
##########################################################################
# READ FROM FILE OR STRING
##########################################################################
# recipe from David Mertz
def filestring(f):
if hasattr(f, 'read'):
return f.read()
elif isinstance(f, basestring):
return open(f).read()
else:
raise ValueError, "Must be called with a filename or file-like object"
##########################################################################
# COUNTER, FOR UNIQUE NAMING
##########################################################################
class Counter:
"""
A counter that auto-increments each time its value is read.
"""
def __init__(self, initial_value=0):
self._value = initial_value
def get(self):
self._value += 1
return self._value
|
py
|
1a5c6a8980721feaa37ee0f9c50ce8aaf1735a92
|
import os
import time
import argparse
import json
import math
import torch
from torch._C import dtype
import torch_geometric
from torch.utils.data import DataLoader, TensorDataset, IterableDataset
from torch.utils.data.distributed import DistributedSampler
from argparse import Namespace
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import pickle as pk
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from torch_geometric.data import Data, Batch, NeighborSampler, ClusterData, ClusterLoader
from torch_scatter import scatter
from base_task import add_config_to_argparse, BaseConfig, BasePytorchTask, \
LOSS_KEY, BAR_KEY, SCALAR_LOG_KEY, VAL_SCORE_KEY
from dataset import SAINTDataset, SimpleDataset
from data_utils import load_data, load_new_data
from nbeats import NBeatsModel
from hierst import HierSTModel
from graph_optim import TruncateSGD, TruncateAdam
class Config(BaseConfig):
def __init__(self):
super().__init__()
# Reset base variables
self.max_epochs = 1000
self.early_stop_epochs = 15
self.infer = False
# for data loading
self.data_fp = '../data/daily_us_7.csv'
self.start_date = '2020-03-01'
self.min_peak_size = -1 # min peak confirmed cases selected by country level
self.lookback_days = 14 # the number of days before the current day for daily series
self.lookahead_days = 1
self.forecast_date = '2020-06-29'
self.horizon = 7
self.val_days = 1 # the number of days used for validation
self.label = 'confirmed_target'
self.use_mobility = False
self.model_type = 'hierst' # choices: nbeats, hierst
self.rnn_type = 'nbeats'
self.date_emb_dim = 2
self.use_gbm = False
self.use_lr = True
# for krnn
self.cnn_dim = 32
self.cnn_kernel_size = 3
self.rnn_dim = 32
self.rnn_dups = 10
# for transformer
self.tfm_layer_num = 8
self.tfm_head_num = 8
self.tfm_hid_dim = 32
self.tfm_ff_dim = 32
self.tfm_max_pos = 500
self.tfm_node_dim = 5
self.tfm_dropout = 0.1
self.tfm_block_num = -1
self.tfm_cnn_kernel_size = 1
# for n_beats
self.block_size = 3
self.hidden_dim = 32
self.id_emb_dim = 8
# for gcn
self.gcn_dim = 32
self.gcn_type = 'gcn'
self.gcn_aggr = 'max'
self.gcn_norm = 'none'
self.gcn_layer_num = 2
self.gcn_node_dim = 4
self.gcn_edge_dim = 4
self.gcn_dropout = 0.1
# for gov gate
self.use_gov_gate = False
self.gov_id_dim = 32
self.gov_hid_dim = 32
# per-gpu training batch size, real_batch_size = batch_size * num_gpus * grad_accum_steps
self.batch_size = 4
self.lr = 1e-3 # the learning rate
# batch sample type
self.use_saintdataset = True
self.saint_batch_size = 3000
self.saint_sample_type = 'random_walk'
self.saint_walk_length = 2
self.saint_shuffle_order = 'node_first'
# graph optimization (deprecated)
self.optim_graph = False
self.graph_fp = '../data/us_graph.cpt'
self.graph_lr = 1e-4 # learning rate for graph adjacent matrix
self.graph_opt_type = 'TruncateAdam' # TruncateAdam, TruncateSGD, Adam
self.graph_gravity = 0.1 # sparse regularization coefficients
self.graph_eta = 0.01 # \eta * || A - A_{prior} ||_2^2
# consistency loss
# the usage of 'xxxx_loss_node_num'
# -1: use all nodes,
# 0: not use this loss,
# >0: use a certain number of randomly selected nodes
self.topo_loss_node_num = -1
self.topo_loss_weight = 0.01
self.topo_loss_epoch_start = 3
self.pair_loss_node_num = -1
self.pair_loss_weight = 0.0
# temp options
self.use_node_weight = True
self.mape_eps = 10
self.sparse_gate_weight = 0.0
self.sparse_gate_epoch_start = 3
self.prepro_type = 'none'
self.use_popu_norm = True
self.use_logy = False
self.use_fea_zscore = False
self.use_adapt_norm = False
self.use_default_edge = False
self.abla_type = 'none'
self.fea_day_offset = 1
self.data_aug_scales = '1' # a list of scales applied for training data augmentation
class WrapperNet(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
if self.config.model_type == 'nbeats':
self.net = NBeatsModel(config)
elif self.config.model_type == 'hierst':
self.net = HierSTModel(config)
else:
raise Exception(
'Unsupported model type {}'.format(config.model_type))
if config.use_lr:
self.weight_lr = nn.Parameter(torch.Tensor(self.config.lookback_days, self.config.lookahead_days))
self.b_lr = nn.Parameter(torch.Tensor([0.0] * self.config.lookahead_days))
if config.use_gov_gate:
self.state_emb = nn.Embedding(self.config.num_nodes, self.config.gov_id_dim)
self.gov_gru = nn.GRU(input_size=self.config.day_gov_fea_dim,
hidden_size=self.config.gov_hid_dim,
batch_first=True)
self.state_weight = nn.Parameter(torch.Tensor(self.config.gov_hid_dim, self.config.lookahead_days))
self.gov_weight = nn.Parameter(torch.Tensor(self.config.gov_id_dim, self.config.lookahead_days))
self.reset_parameters()
def gov_map(self, input_day_gov):
sz = input_day_gov.size()
x = input_day_gov.view(-1, sz[2], sz[3])
_, h = self.gov_gru(x)
h = h[0,:,:].view(sz[0],sz[1],-1)
return h
def state_map(self, input_day, g):
sz = input_day.size()
n_id = g['cent_n_id']
id_emb = self.state_emb(n_id.reshape(1,sz[1]).expand(sz[0],sz[1]).long())
return id_emb
def lr(self, input_day):
sz = input_day.size()
label_idx = self.config.label_fea_idx
ts = input_day[:,:,:,label_idx]
if self.config.use_logy:
ts = ts.expm1()
pred = torch.matmul(ts, torch.softmax(self.weight_lr, dim=0)) + self.b_lr
if self.config.use_logy:
pred = torch.log1p(pred)
pred = pred.view(sz[0], sz[1], self.config.lookahead_days)
return pred
def reset_parameters(self):
if self.config.use_lr:
nn.init.xavier_uniform_(self.weight_lr)
if self.config.use_gov_gate:
nn.init.xavier_uniform_(self.gov_weight)
nn.init.xavier_uniform_(self.state_weight)
def forward_ori(self, input_day, g):
out = self.net(input_day, g)
if self.config.use_lr:
out = out + self.lr(input_day)
return out
def forward(self, input_day, input_day_gov, g):
ori_out = self.forward_ori(input_day, g)
if self.config.use_gov_gate:
gov_hid = self.gov_map(input_day_gov)
state_hid = self.state_map(input_day, g)
state_gate = torch.sigmoid(torch.matmul(state_hid, self.state_weight))
gov_gate = torch.tanh(torch.matmul(gov_hid, self.gov_weight))
out = ori_out * (1 + state_gate * gov_gate)
else:
out, state_gate, gov_gate = ori_out, torch.ones_like(ori_out), torch.ones_like(ori_out)
return out, state_gate, gov_gate
class GraphNet(nn.Module):
def __init__(self, config, edge_weight):
super().__init__()
self.config = config
self.net = WrapperNet(config)
if config.optim_graph:
self.edge_weight = nn.Parameter(edge_weight)
else:
self.edge_weight = None
def forward(self, input_day, input_day_gov, g):
return self.net(input_day, input_day_gov, g)
def get_net_parameters(self):
return self.net.parameters()
def get_graph_parameters(self):
yield self.edge_weight
def weighted_mse_loss(input, target, weight):
return torch.mean(weight * (input - target) ** 2)
class Task(BasePytorchTask):
def __init__(self, config):
super().__init__(config)
self.log('Intialize {}'.format(self.__class__))
self.init_data()
self.init_graph()
self.adjust_for_ablation_study()
self.loss_func = nn.MSELoss()
# self.loss_func = nn.L1Loss()
self.log('Config:\n{}'.format(
json.dumps(self.config.to_dict(), ensure_ascii=False, indent=4)
))
def adjust_for_ablation_study(self):
if self.config.abla_type == 'gat':
self.config.gcn_type = 'gat'
elif self.config.abla_type == 'flat':
edge_sid = (self.edge_type == 0).sum().item()
self.edge_index = self.edge_index[:, edge_sid:]
self.edge_weight = self.edge_weight[edge_sid:]
self.edge_type = self.edge_type[edge_sid:]
self.config.num_edges = self.edge_weight.shape[0]
elif self.config.abla_type.startswith('sep'):
cur_node_type = int(self.config.abla_type[3:])
node_sid, node_eid = None, None
for idx, x in enumerate(self.node_type_list):
if node_sid is None:
if x == cur_node_type:
node_sid = idx
node_eid = idx
elif x == cur_node_type:
node_eid = idx
node_eid += 1
edge_sid = (self.edge_type == 0).sum().item()
self.edge_index = self.edge_index[:, edge_sid:]
self.edge_weight = self.edge_weight[edge_sid:]
self.edge_type = self.edge_type[edge_sid:]
sel_edge_mask = (self.edge_index[0] >= node_sid) & (self.edge_index[0] < node_eid)
self.edge_index = self.edge_index[:, sel_edge_mask] - node_sid
self.edge_weight = self.edge_weight[sel_edge_mask]
self.edge_type = self.edge_type[sel_edge_mask]
self.config.num_edges = self.edge_weight.shape[0]
self.node_type = torch.zeros_like(self.node_type[node_sid:node_eid])
self.node_type_list = [0] * (node_eid-node_sid)
self.config.num_node_types = 1
self.node_name = self.node_name[node_sid:node_eid]
self.node_weight = self.node_weight[cur_node_type:cur_node_type+1]
self.use_node_weight = False
if self.node_popu is not None:
self.node_popu = self.node_popu[node_sid:node_eid]
self.nodes = self.nodes[node_sid:node_eid]
self.config.num_nodes = node_eid - node_sid
self.train_day_inputs = self.train_day_inputs[:, node_sid:node_eid]
self.train_day_gov_inputs = self.train_day_gov_inputs[:, node_sid:node_eid]
self.train_gbm_outputs = self.train_gbm_outputs[:, node_sid:node_eid]
self.train_outputs = self.train_outputs[:, node_sid:node_eid]
self.val_day_inputs = self.val_day_inputs[:, node_sid:node_eid]
self.val_day_gov_inputs = self.val_day_gov_inputs[:, node_sid:node_eid]
self.val_gbm_outputs = self.val_gbm_outputs[:, node_sid:node_eid]
self.val_outputs = self.val_outputs[:, node_sid:node_eid]
self.test_day_inputs = self.test_day_inputs[:, node_sid:node_eid]
self.test_day_gov_inputs = self.test_day_gov_inputs[:, node_sid:node_eid]
self.test_gbm_outputs = self.test_gbm_outputs[:, node_sid:node_eid]
self.test_outputs = self.test_outputs[:, node_sid:node_eid]
else:
pass
def init_data(self, data_fp=None):
if data_fp is None:
data_fp = self.config.data_fp
# load data
self.config.label_fea_name = f'{self.config.label[:-7]}.rolling({self.config.horizon}).sum()'
day_inputs, day_gov_inputs, outputs, dates, nodes, \
self.main_feas, self.gov_feas, self.node_popu, self.fea_scaler = \
load_new_data(data_fp, self.config, logger=self.log)
self.config.adapt_norm_eps = 1
self.config.label_fea_idx = dict(zip(self.main_feas, range(len(self.main_feas))))[self.config.label_fea_name]
if self.node_popu is not None:
self.node_popu = self.node_popu.to(self.device)
gbm_outputs = outputs
# numpy default dtype is float64, but torch default dtype is float32
self.day_inputs = day_inputs
self.day_gov_inputs = day_gov_inputs
self.outputs = outputs
self.gbm_outputs = gbm_outputs
self.dates = dates # share index with sample id
self.nodes = nodes # share index with node id
# fulfill config
self.config.num_nodes = self.day_inputs.shape[1]
self.config.day_seq_len = self.day_inputs.shape[2]
self.config.day_fea_dim = self.day_inputs.shape[3]
self.config.day_gov_fea_dim = self.day_gov_inputs.shape[3]
# self.config.edge_fea_dim = self.edge_attr.shape[1]
# Filter by label dates
use_dates = [
pd.to_datetime(item) for item in dates
if pd.to_datetime(item) <= pd.to_datetime(self.config.forecast_date)
]
test_divi = len(use_dates) - 1
val_divi = test_divi - self.config.horizon
train_divi = val_divi - self.config.val_days
if self.config.infer:
# use all achieved train data
train_divi = val_divi + 1
print(dates[train_divi],dates[val_divi],dates[test_divi])
self.train_day_inputs = self.day_inputs[:train_divi+1]
self.train_day_gov_inputs = self.day_gov_inputs[:train_divi+1]
self.train_gbm_outputs = self.gbm_outputs[:train_divi+1]
self.train_outputs = self.outputs[:train_divi+1]
self.train_dates = self.dates[:train_divi+1]
if self.config.data_aug_scales != '1':
data_aug_scales = [float(s) for s in self.config.data_aug_scales.split(',')]
scale_fea_end = -1
print(f'Data Augmentation Scaling {data_aug_scales} for {self.main_feas[:scale_fea_end]}')
def aug_scale(day_input, is_label=False):
if is_label:
aug_inputs = [day_input * s for s in data_aug_scales]
else:
scale_part = day_input[:, :, :, :scale_fea_end]
invar_part = day_input[:, :, :, scale_fea_end:]
aug_inputs = []
for s in data_aug_scales:
aug_part = scale_part * s
aug_part = torch.cat([aug_part, invar_part], dim=-1)
aug_inputs.append(aug_part)
aug_input = torch.cat(aug_inputs, dim=0)
return aug_input
self.train_day_inputs = aug_scale(self.train_day_inputs)
self.train_day_gov_inputs = aug_scale(self.train_day_gov_inputs)
self.train_gbm_outputs = aug_scale(self.train_gbm_outputs, is_label=True)
self.train_outputs = aug_scale(self.train_outputs, is_label=True)
self.train_dates = self.train_dates * len(data_aug_scales)
if self.config.infer:
self.val_day_inputs = self.day_inputs[:train_divi+1]
self.val_day_gov_inputs = self.day_gov_inputs[:train_divi+1]
self.val_gbm_outputs = self.gbm_outputs[:train_divi+1]
self.val_outputs = self.outputs[:train_divi+1]
self.val_dates = self.dates[:train_divi+1]
else:
self.val_day_inputs = self.day_inputs[val_divi:val_divi+1]
self.val_day_gov_inputs = self.day_gov_inputs[val_divi:val_divi+1]
self.val_gbm_outputs = self.gbm_outputs[val_divi:val_divi+1]
self.val_outputs = self.outputs[val_divi:val_divi+1]
self.val_dates = self.dates[val_divi:val_divi+1]
self.test_day_inputs = self.day_inputs[test_divi:test_divi+1]
self.test_day_gov_inputs = self.day_gov_inputs[test_divi:test_divi+1]
self.test_gbm_outputs = self.gbm_outputs[test_divi:test_divi+1]
self.test_outputs = self.outputs[test_divi:test_divi+1]
self.test_dates = self.dates[test_divi:test_divi+1]
def init_graph(self, graph_fp=None):
if graph_fp is None:
graph_fp = self.config.graph_fp
graph_dict = torch.load(graph_fp)
self.edge_index = graph_dict['edge_index']
self.edge_weight = graph_dict['edge_weight']
self.edge_type = graph_dict['edge_type'].to(self.device)
self.node_type = graph_dict['node_type'].to(self.device)
self.node_type_list = list(graph_dict['node_type'].numpy())
self.node_name = graph_dict['node_name']
if self.config.num_nodes != len(self.node_name):
data_node_set = set(self.nodes)
graph_node_set = set(self.node_name)
print('New nodes in data', data_node_set - graph_node_set)
print('Missing nodes in data', graph_node_set - data_node_set)
raise Exception('Please regenerate GNN topo before running')
self.config.num_edges = self.edge_weight.shape[0]
self.config.num_node_types = int(graph_dict['node_type'].max()) + 1
self.config.num_edge_types = int(graph_dict['edge_type'].max()) + 1
base_ones = torch.ones_like(self.node_type, dtype=torch.float)
node_type_count = scatter(base_ones, self.node_type, dim_size=self.config.num_node_types, reduce='sum')
# the weight of the bottom nodes is equal to 1
self.node_weight = 1.0 / node_type_count * node_type_count.max()
def make_sample_dataloader(self, day_inputs, day_gov_inputs, gbm_outputs, outputs, shuffle=False):
if self.config.use_saintdataset:
dataset = SAINTDataset(
[day_inputs, day_gov_inputs, gbm_outputs, outputs],
self.edge_index, self.edge_weight, self.config.num_nodes,
self.config.batch_size, shuffle=shuffle,
shuffle_order=self.config.saint_shuffle_order,
saint_sample_type=self.config.saint_sample_type,
saint_batch_size=self.config.saint_batch_size,
saint_walk_length=self.config.saint_walk_length,
)
return DataLoader(dataset, batch_size=None)
else:
dataset = SimpleDataset([day_inputs, day_gov_inputs, gbm_outputs, outputs])
def collate_fn(samples):
day_inputs = torch.cat([item[0][0] for item in samples]).unsqueeze(0) # [1,bs,seq_length,feature_dim]
day_gov_inputs = torch.cat([item[0][1] for item in samples]).unsqueeze(0) # [1,bs,seq_length,feature_dim]
gbm_outputs = torch.cat([item[0][-2] for item in samples]).unsqueeze(0)
outputs = torch.cat([item[0][-1] for item in samples]).unsqueeze(0)
node_ids = torch.LongTensor([item[1] for item in samples]) # [bs]
date_ids = torch.LongTensor([item[2] for item in samples]) # [bs]
return [[day_inputs, day_gov_inputs, gbm_outputs, outputs], {'cent_n_id':node_ids,'type':'random'}, date_ids]
return DataLoader(dataset, batch_size=self.config.batch_size, shuffle=shuffle, collate_fn=collate_fn)
def build_train_dataloader(self):
return self.make_sample_dataloader(
self.train_day_inputs, self.train_day_gov_inputs, self.train_gbm_outputs, self.train_outputs, shuffle=True
)
def build_val_dataloader(self):
return self.make_sample_dataloader(
self.val_day_inputs, self.val_day_gov_inputs, self.val_gbm_outputs, self.val_outputs, shuffle=False
)
def build_test_dataloader(self):
return self.make_sample_dataloader(
self.test_day_inputs, self.test_day_gov_inputs, self.test_gbm_outputs, self.test_outputs, shuffle=False
)
def build_optimizer(self, model):
model_opt = torch.optim.Adam(self.model.get_net_parameters(), lr=self.config.lr)
if self.config.optim_graph:
kwargs = {
'lr': self.config.graph_lr,
}
if self.config.graph_opt_type == 'Adam':
opt_class = torch.optim.Adam
elif self.config.graph_opt_type == 'TruncateSGD':
kwargs['gravity'] = self.config.graph_gravity
opt_class = TruncateSGD
elif self.config.graph_opt_type == 'TruncateAdam':
kwargs['gravity'] = self.config.graph_gravity
kwargs['lr_truncate'] = self.config.graph_lr
opt_class = TruncateAdam
else:
raise Exception("Unsupported graph optimizer '{}'".format(self.config.graph_opt_type))
graph_opt = opt_class(self.model.get_graph_parameters(), **kwargs)
return model_opt, graph_opt
else:
return model_opt
def train_step(self, batch, batch_idx):
inputs, g, _ = batch
# prepare inputs, outputs
input_day, input_day_gov, y_gbm, y = inputs
if self.config.use_gbm: # deprecated
y = y - y_gbm
if self.config.use_adapt_norm:
norm_eps = self.config.adapt_norm_eps
input_norm = input_day.mean(dim=-2, keepdim=True) + norm_eps
y_norm = input_norm[:, :, :, self.config.label_fea_idx] + norm_eps
input_day = (input_day+norm_eps) / input_norm
y = (y+norm_eps) / y_norm
else:
norm_eps = 0
input_norm = 1
y_norm = 1
# prepare graph
g['edge_type'] = self.edge_type[g['e_id']]
g['node_type'] = self.node_type[g['cent_n_id']]
if self.config.optim_graph:
g['edge_attr_prior'] = g['edge_attr']
g['edge_attr'] = self.model.edge_weight[g['e_id']]
y_hat, _, _ = self.model(input_day, input_day_gov, g)
assert(y.size() == y_hat.size())
if self.config.use_node_weight:
node_weight = self.node_weight[g['node_type']]\
.reshape(1, y.shape[1], 1)
loss = weighted_mse_loss(y_hat, y, node_weight)
else:
node_weight = None
loss = self.loss_func(y_hat, y)
y_loss_i = loss.item()
if self.config.optim_graph:
graph_loss = self.loss_func(g['edge_attr'], g['edge_attr_prior'])
loss += self.config.graph_eta * graph_loss
if self.config.topo_loss_weight > 0 and \
self._current_epoch >= self.config.topo_loss_epoch_start:
# get topo_edge_index
edge_index = g['edge_index']
node_type = g['node_type']
i, j = 1, 0
node_type_j = node_type[edge_index[j]]
node_type_i = node_type[edge_index[i]]
topo_edge_index = edge_index[:, node_type_i == node_type_j-1]
# calculate aggregated y
if self.config.use_adapt_norm:
y = y * y_norm - norm_eps
y_hat = y_hat * y_norm - norm_eps
if self.config.use_logy:
y = y.expm1() # exp(y)-1, where y = log(1+label)
y_hat = y_hat.expm1() # exp(y_hat)-1, where y = log(1+label_hat)
if self.config.use_popu_norm:
popu = self.node_popu[g['cent_n_id']]\
.reshape(1, g['cent_n_id'].shape[0], 1)
y = y * popu / 10**5
y_hat = y_hat * popu / 10**5
y_j = y[:, topo_edge_index[j], :]
y_hat_j = y_hat[:, topo_edge_index[j], :]
y_agg = scatter(y_j, topo_edge_index[i], dim=-2, dim_size=y.shape[-2], reduce='sum')
y_hat_agg = scatter(y_hat_j, topo_edge_index[i], dim=-2, dim_size=y_hat.shape[-2], reduce='sum')
# use agg mask to ignore bottom node
bottom_node_type = node_type.max()
agg_mask = node_type < bottom_node_type
ym = y[:, agg_mask]
ym_hat = y_hat[:, agg_mask]
ym_agg = y_agg[:, agg_mask]
ym_hat_agg = y_hat_agg[:, agg_mask]
eps = self.config.mape_eps
topo_loss = self.loss_func((ym_hat_agg+eps)/(ym_agg+eps), torch.ones_like(ym_agg)) + \
self.loss_func((ym_hat_agg+eps)/(ym_agg+eps), (ym_hat+eps)/(ym+eps),)
loss += self.config.topo_loss_weight * topo_loss
topo_loss_i = topo_loss.item()
else:
topo_loss_i = 0
# judge to avoid useless computation
if self.config.pair_loss_node_num != 0 and self.config.pair_loss_weight > 0:
pair_edge_index = g['edge_index'] # consider every pair in the graph
if self.config.pair_loss_node_num > 0:
num_edges = pair_edge_index.shape[1]
rand_eids = torch.randperm(num_edges, device=loss.device)[:self.config.pair_loss_node_num]
pair_edge_index = pair_edge_index[:, rand_eids]
i, j = 1, 0
logy_j = y[:, pair_edge_index[j], :]
logy_i = y[:, pair_edge_index[i], :]
logy_j_hat = y_hat[:, pair_edge_index[j], :]
logy_i_hat = y_hat[:, pair_edge_index[i], :]
pair_loss = weighted_mse_loss(
(logy_j_hat - logy_j).exp(), # (y_j_hat+1) / (y_j+1)
(logy_i_hat - logy_i).exp(), # (y_i_hat+1) / (y_i+1)
0.5*(logy_j + logy_i), # pay more attention to large nodes
)
loss += self.config.pair_loss_weight * pair_loss
pair_loss_i = pair_loss.item()
else:
pair_loss_i = 0
if self.config.sparse_gate_weight > 0:
gate_loss = self.model.net.net.gcn_coef.mean()
if self._current_epoch >= self.config.sparse_gate_epoch_start:
loss += self.config.sparse_gate_weight * gate_loss
gate_loss_i = gate_loss.item()
else:
gate_loss_i = 0
loss_i = loss.item() # scalar loss
# log all kinds of losses for debug
loss_info = {
'loss': loss_i,
'y_loss': y_loss_i,
'topo_loss': topo_loss_i,
'pair_loss': pair_loss_i,
'gate_loss': gate_loss_i,
}
return {
LOSS_KEY: loss,
BAR_KEY: loss_info,
SCALAR_LOG_KEY: loss_info,
}
def eval_step(self, batch, batch_idx, tag):
inputs, g, rows = batch
input_day, input_day_gov, y_gbm, y = inputs
if self.config.use_adapt_norm:
norm_eps = self.config.adapt_norm_eps
input_norm = input_day.mean(dim=-2, keepdim=True) + norm_eps
y_norm = input_norm[:, :, :, self.config.label_fea_idx] + norm_eps
input_day = (input_day + norm_eps) / input_norm
else:
norm_eps = 0
input_norm = 1
y_norm = 1
forecast_length = y.size()[-1]
g['edge_type'] = self.edge_type[g['e_id']]
g['node_type'] = self.node_type[g['cent_n_id']]
if self.config.optim_graph:
g['edge_attr_prior'] = g['edge_attr']
g['edge_attr'] = self.model.edge_weight[g['e_id']]
y_hat, state_gate, gov_gate = self.model(input_day, input_day_gov, g)
if self.config.use_gbm:
y_hat += y_gbm
assert(y.size() == y_hat.size())
if self.config.use_adapt_norm:
y_hat = y_hat * y_norm - norm_eps
if self.config.use_logy:
y = y.expm1() # exp(y)-1, where y = log(1+label)
y_hat = y_hat.expm1() # exp(y_hat)-1, where y = log(1+label_hat)
if self.config.use_popu_norm:
popu = self.node_popu[g['cent_n_id']]\
.reshape(1, g['cent_n_id'].shape[0], 1)
y = y * popu / 10**5
y_hat = y_hat * popu / 10**5
if g['type'] == 'subgraph' and 'res_n_id' in g: # if using SAINT sampler
cent_n_id = g['cent_n_id']
res_n_id = g['res_n_id']
# Note: we only evaluate predictions on those initial nodes (per random walk)
# to avoid duplicated computations
y = y[:, res_n_id]
y_hat = y_hat[:, res_n_id]
cent_n_id = cent_n_id[res_n_id]
else:
cent_n_id = g['cent_n_id']
if self.config.use_saintdataset:
index_ptr = torch.cartesian_prod(
torch.arange(rows.size(0)),
torch.arange(cent_n_id.size(0)),
torch.arange(forecast_length)
)
label = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 1]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,2].data.cpu().numpy(),
'val': y.flatten().data.cpu().numpy()
})
pred = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 1]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,2].data.cpu().numpy(),
'val': y_hat.flatten().data.cpu().numpy()
})
else:
index_ptr = torch.cartesian_prod(
torch.arange(rows.size(0)),
torch.arange(forecast_length)
)
label = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 0]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,1].data.cpu().numpy(),
'val': y.flatten().data.cpu().numpy()
})
pred = pd.DataFrame({
'row_idx': rows[index_ptr[:, 0]].data.cpu().numpy(),
'node_idx': cent_n_id[index_ptr[:, 0]].data.cpu().numpy(),
'forecast_idx': index_ptr[:,1].data.cpu().numpy(),
'val': y_hat.flatten().data.cpu().numpy()
})
pred = pred.groupby(['row_idx', 'node_idx', 'forecast_idx']).mean()
label = label.groupby(['row_idx', 'node_idx', 'forecast_idx']).mean()
return {
'label': label,
'pred': pred,
'info': [state_gate, gov_gate]
# 'atten': atten_context
}
def eval_epoch_end(self, outputs, tag, dates):
pred = pd.concat([x['pred'] for x in outputs], axis=0)
label = pd.concat([x['label'] for x in outputs], axis=0)
pred = pred.groupby(['row_idx', 'node_idx','forecast_idx']).mean()
label = label.groupby(['row_idx', 'node_idx', 'forecast_idx']).mean()
info = [x['info'] for x in outputs]
# atten_context = [x['atten'] for x in outputs]
align_nodes = label.reset_index().node_idx.map(lambda x: self.nodes[x]).values
align_dates = label.reset_index().row_idx.map(lambda x: dates[x]).values
loss = np.mean(np.abs(pred['val'].values - label['val'].values))
scores = self.produce_score(pred, label, dates)
log_dict = {
'{}_loss'.format(tag): loss,
'{}_mae'.format(tag): scores['mean_mistakes'],
'{}_mape'.format(tag): scores['mape'],
# '{}_mean_mistakes'.format(tag): scores['mean_mistakes'],
# '{}_mean_label'.format(tag): scores['mean_label'],
# '{}_mean_predict'.format(tag): scores['mean_predict']
}
type_mae_sum = 0
type_mape_sum = 0
for type_id in range(self.config.num_node_types):
cur_pred = pred[
pred.index.get_level_values(1).map(lambda x: self.node_type_list[x]) == type_id
]
cur_label = label[
label.index.get_level_values(1).map(lambda x: self.node_type_list[x]) == type_id
]
cur_scores = self.produce_score(cur_pred, cur_label, dates)
log_dict[f'{tag}_type-{type_id}_mae'] = cur_scores['mean_mistakes']
log_dict[f'{tag}_type-{type_id}_mape'] = cur_scores['mape']
type_mae_sum += cur_scores['mean_mistakes']
type_mape_sum += cur_scores['mape']
log_dict[f'{tag}_type-mean_mae'] = type_mae_sum / self.config.num_node_types
log_dict[f'{tag}_type-mean_mape'] = type_mape_sum / self.config.num_node_types
out = {
BAR_KEY: log_dict,
SCALAR_LOG_KEY: log_dict,
VAL_SCORE_KEY: - type_mape_sum,
'pred': pred,
'label': label,
'scores': scores,
'dates':align_dates,
'nodes':align_nodes,
'info': info,
'y_scale': 'linear',
'epoch': self._passed_epoch,
# 'atten': atten_context
}
return out
def produce_score(self, pred, label, dates=None):
# y_hat = pred.apply(lambda x: np.expm1(x))
# y = label.apply(lambda x: np.expm1(x))
y_hat = pred
y = label
mape_eps = self.config.mape_eps
mape_df = np.abs((y_hat+mape_eps)/(y+mape_eps)-1).reset_index(drop=False)
mape_val = np.abs((y_hat.values+1)/(y.values+1)-1).mean()
mean_mistakes = np.abs(y_hat.values - y.values).mean()
mean_label = np.abs(y.values).mean()
mean_predict = np.abs(y.values).mean()
eval_df = pd.concat([y_hat.rename(columns={'val': 'pred'}),
y.rename(columns={'val': 'label'})],
axis=1).reset_index(drop=False)
eval_df['mape'] = mape_df['val']
if dates is not None:
eval_df['date'] = eval_df.row_idx.map(lambda x: dates[x])
eval_df['nodes'] = eval_df.node_idx.map(lambda x: self.nodes[x])
def produce_percent_count(m_df):
res = pd.Series()
res['pred'] = m_df['pred'].mean()
res['label'] = m_df['label'].mean()
res['mistake'] = np.abs(m_df['pred'] - m_df['label']).mean()
return res
scores = {
'mape': mape_val,
'mean_mistakes': mean_mistakes,
'mean_label': mean_label,
'mean_predict': mean_predict
}
for name, metric in [
('mistakes', eval_df),
]:
scores[name] = metric.groupby(
'row_idx').apply(produce_percent_count)
if dates is not None:
scores[name]['date'] = dates
return scores
def val_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, 'val')
def val_epoch_end(self, outputs):
val_out = self.eval_epoch_end(outputs, 'val', self.val_dates)
return val_out
def test_step(self, batch, batch_idx):
return self.eval_step(batch, batch_idx, 'test')
def test_epoch_end(self, outputs):
test_out = self.eval_epoch_end(outputs, 'test', self.test_dates)
return test_out
if __name__ == '__main__':
start_time = time.time()
# build argument parser and config
config = Config()
parser = argparse.ArgumentParser(description='COVID-19 Forecasting Task')
add_config_to_argparse(config, parser)
# parse arguments to config
args = parser.parse_args()
config.update_by_dict(args.__dict__)
# build task
task = Task(config)
# Set random seed before the initialization of network parameters
# Necessary for distributed training
task.set_random_seed()
net = GraphNet(task.config, task.edge_weight)
task.init_model_and_optimizer(net)
task.log('Build Neural Nets')
# select epoch with best validation accuracy
best_epochs = 50
if not task.config.skip_train:
task.fit()
best_epochs = task._best_val_epoch
print('Best validation epochs: {}'.format(best_epochs))
# Resume the best checkpoint for evaluation
task.resume_best_checkpoint()
val_eval_out = task.val_eval()
test_eval_out = task.test_eval()
# dump evaluation results of the best checkpoint to val out
task.dump(val_out=val_eval_out,
test_out=test_eval_out,
epoch_idx=-1,
is_best=True,
dump_option=1)
task.log('Best checkpoint (epoch={}, {}, {})'.format(
task._passed_epoch, val_eval_out[BAR_KEY], test_eval_out[BAR_KEY]))
if task.is_master_node:
for tag, eval_out in [
('val', val_eval_out),
('test', test_eval_out),
]:
print('-'*15, tag)
scores = eval_out['scores']['mistakes']
print('-'*5, 'mistakes')
print('Average:')
print(scores.mean().to_frame('mistakes'))
print('Daily:')
print(scores)
task.log('Training time {}s'.format(time.time() - start_time))
|
py
|
1a5c6ba3e04041fd9e8d8e1189b19104c4ede43a
|
import inspect
from copy import deepcopy
from pygmol.abc import Chemistry
class ExampleChemistry(Chemistry):
"""An example chemistry for the following
Species:
--------
O, O-, O--, O2, Ar, Ar+, Ar++, Ar*, Ar**
Reactions:
----------
1: e + O2 > O2 + e
2: e + Ar > e + Ar
3: e + Ar+ > Ar+ + e
4: e + Ar > Ar* + e # made up for testing purposes...
6: e + Ar* > Ar** + e
7: e + Ar* > Ar + e
8: e + Ar** > Ar + e
9: e + Ar** > Ar* + e
10: Ar + e > Ar++ + e + e + e # made up for testing purposes...
12: e + Ar* > Ar+ + e + e
13: e + Ar** > Ar+ + e + e
14: e + O > O- # made up for testing purposes...
15: e + O2 > O- + O
16: e + e + O > O-- # made up for testing purposes...
19: e + O2 > e + O + O
20: e + O- > O-- # made up for testing purposes...
21: Ar+ + e > Ar++ + e + e
22: Ar* + Ar* > Ar+ + Ar + e
23: Ar* + Ar** > Ar+ + Ar + e
25: Ar + Ar+ > Ar + Ar+
26: O- + O > O2 + e
27: Ar+ + O- > Ar + O
28: Ar+ + O- + M > Ar + O + M # made up for testing purposes...
29: Ar++ + O-- + M > Ar + O + M # made up for testing purposes...
30: Ar** > Ar*
31: O-- > e + O- # made up for testing purposes...
"""
species_ids = ["O", "O-", "O--", "O2", "Ar", "Ar+", "Ar++", "Ar*", "Ar**"]
species_charges = [0, -1, -2, 0, 0, 1, 2, 0, 0]
species_masses = [
15.9994,
15.9994,
15.9994,
31.9988,
39.948,
39.948,
39.948,
39.948,
39.948,
]
species_lj_sigma_coefficients = [
3.05,
3.05,
3.05,
3.47,
3.542,
3.542,
3.542,
3.542,
3.542,
]
# only ions and excited species get stuck to surfaces
species_surface_sticking_coefficients = [0, 0, 0, 0, 0, 0, 0, 0, 0]
# singly ionized and excited species return as neutrals
# doubly ionized and excited species return half as singly, half as neutrals
species_surface_return_matrix = [
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
]
reactions_ids = [
1,
2,
3,
4,
6,
7,
8,
9,
10,
12,
13,
14,
15,
16,
19,
20,
21,
22,
23,
25,
26,
27,
28,
29,
30,
31,
]
reactions_strings = [
"e + O2 -> O2 + e",
"e + Ar -> e + Ar",
"e + Ar+ -> Ar+ + e",
"e + Ar -> Ar* + e", # made up for testing purposes...
"e + Ar* -> Ar** + e",
"e + Ar* -> Ar + e",
"e + Ar** -> Ar + e",
"e + Ar** -> Ar* + e",
"Ar + e -> Ar++ + e + e + e", # made up for testing purposes...
"e + Ar* -> Ar+ + e + e",
"e + Ar** -> Ar+ + e + e",
"e + O -> O-", # made up for testing purposes...
"e + O2 -> O- + O",
"e + e + O -> O--", # made up for testing purposes...
"e + O2 -> e + O + O",
"e + O- -> O--", # made up for testing purposes...
"Ar+ + e -> Ar++ + e + e",
"Ar* + Ar* -> Ar+ + Ar + e",
"Ar* + Ar** -> Ar+ + Ar + e",
"Ar + Ar+ -> Ar + Ar+",
"O- + O -> O2 + e",
"Ar+ + O- -> Ar + O",
"Ar+ + O- + M -> Ar + O + M", # made up for testing purposes...
"Ar++ + O-- + M -> Ar + O + M", # made up for testing purposes...
"Ar** -> Ar*",
"O-- -> e + O-", # made up for testing purposes...
]
reactions_arrh_a = [
3.93e-14,
2.660e-13,
1.61e-10,
1.00e-11,
7.91e-13,
1.96e-15,
2.26e-14,
6.86e-13,
1e-09,
1.42e-13,
3.45e-13,
1e-16,
6.74e-16,
1.00e-32,
1.75e-14,
1e-16,
1e-09,
1e-16,
1.2e-15,
5.66e-16,
3e-16,
2.7e-13,
1e-37,
1e-37,
100000.0,
100.0,
]
reactions_arrh_b = [
0.628,
-0.0128,
-1.22,
0.405,
0.281,
0.319,
0.102,
0.337,
0.5,
0.195,
-0.0177,
0.0,
-1.02,
0.0,
-1.28,
0.0,
0.5,
0.0,
0.5,
0.5,
-0.5,
0.0,
0.0,
0.0,
0.0,
0.0,
]
reactions_arrh_c = [
-0.0198,
3.15,
0.0382,
12.1,
1.9,
0.985,
0.0441,
0.102,
15.0,
4.38,
4.11,
0.0,
5.78,
0.0,
7.38,
0.0,
10.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
reactions_el_energy_losses = [
0.0,
0.0,
0.0,
11.6,
1.575,
0.0,
0.0,
0.0,
15.0,
4.425,
2.9,
0.0,
0.0,
0.0,
4.5,
0.0,
10.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
reactions_elastic_flags = [
True,
True,
True,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
False,
True,
False,
False,
False,
False,
False,
False,
]
reactions_electron_stoich_lhs = [
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
2,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
reactions_electron_stoich_rhs = [
1,
1,
1,
1,
1,
1,
1,
1,
3,
2,
2,
0,
0,
0,
1,
0,
2,
1,
1,
0,
1,
0,
0,
0,
0,
1,
]
reactions_arbitrary_stoich_lhs = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
]
reactions_arbitrary_stoich_rhs = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
]
reactions_species_stoichiomatrix_lhs = [
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1],
[0, 0, 0, 0, 1, 1, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
]
reactions_species_stoichiomatrix_rhs = [
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0],
]
def __init__(self):
# save all the class attributes as instance attributes:
for attr, val in inspect.getmembers(ExampleChemistry):
if not attr[0].startswith("_"):
setattr(self, attr, deepcopy(getattr(ExampleChemistry, attr)))
|
py
|
1a5c6be8720665d341b4d2addd92bbf4cb246f4e
|
import matplotlib.pyplot as plot
import numpy as np
import sys
def loadandsave():
vlxt_percentage = np.loadtxt('/home/oates/work/disorder/vlxt_percentages.dat')
#outfile = open('/home/oates/work/disorder/vlxt_percentages.pkl', 'wb')
#pickle.dump(vlxt_percentage,outfile,pickle.HIGHEST_PROTOCOL)
#outfile.close()
np.save('/home/oates/work/disorder/vlxt_percentages.npy',vlxt_percentage)
vsl2b_percentage = np.loadtxt('/home/oates/work/disorder/vsl2b_percentages.dat')
#outfile = open('/home/oates/work/disorder/vsl2b_percentages.pkl', 'wb')
#pickle.dump(vsl2b_percentage,outfile,pickle.HIGHEST_PROTOCOL)
#outfile.close()
np.save('/home/oates/work/disorder/vsl2b_percentages.npy',vsl2b_percentage)
def load(filename):
return np.load(filename)
vlxt_percentage = load('/home/oates/work/disorder/vlxt_percentages.npy')
vsl2b_percentage = load('/home/oates/work/disorder/vsl2b_percentages.npy')
hist, bins = np.histogram(vlxt_percentage,bins=100)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2.0
plot.bar(center,hist,align='center',width=width)
#plot.set_title('VLXT % Disorder')
#plot.set_xlabel('% Amino Acids Disordered')
#plot.set_ylabel('Number of Proteins')
plot.show()
hist, bins = np.histogram(vsl2b_percentage,bins=100)
width=0.7*(bins[1]-bins[0])
center=(bins[:-1]+bins[1:])/2.0
plot.bar(center,hist,align='center',width=width)
#plot.set_title('VSL2b % Disorder')
#plot.set_xlabel('% Amino Acids Disordered')
#plot.set_ylabel('Number of Proteins')
plot.show()
|
py
|
1a5c6c948848452d632341e0a1c3dcedda0e5c2d
|
score = float(input("Enter Score: "))
if score < 1 and score > 0:
if score >= 0.9:
print('A')
elif score >= 0.8:
print('B')
elif score >= 0.7:
print('C')
elif score >= 0.6:
print('D')
else:
print('F')
else:
print('Value of score is out of range.')
largest = None
smallest = None
while True:
num = input("Enter a number: ")
if num == "done" : break
try:
num = int(num)
except:
print('Invalid input')
continue
if largest is None:
largest = num
elif num > largest:
largest = num
elif smallest is None:
smallest = num
elif num < smallest:
smallest = num
#print(num)
print("Maximum is", largest)
print('Minimum is', smallest)
def computepay(h,r):
if h <= 40:
pay = h * r
else:
h1 = h - 40
pay = 40 * r + h1 *(r * 1.5)
return pay
hrs = input("Enter Hours:")
rate = input('Enter Rate:')
h = float(hrs)
r = float(rate)
p = computepay(h,r)
print("Pay",p)
|
py
|
1a5c6cb6dbb4aed97fe01a1e937545eaaaddbd02
|
#
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class auditsyslogpolicy_csvserver_binding(base_resource) :
""" Binding class showing the csvserver that can be bound to auditsyslogpolicy.
"""
def __init__(self) :
self._boundto = None
self._priority = None
self._activepolicy = None
self._name = None
self.___count = None
@property
def name(self) :
r"""Name of the policy.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the policy.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
r"""The entity name to which policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
r"""The entity name to which policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(auditsyslogpolicy_csvserver_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.auditsyslogpolicy_csvserver_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name="", option_="") :
r""" Use this API to fetch auditsyslogpolicy_csvserver_binding resources.
"""
try :
if not name :
obj = auditsyslogpolicy_csvserver_binding()
response = obj.get_resources(service, option_)
else :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
r""" Use this API to fetch filtered set of auditsyslogpolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
r""" Use this API to count auditsyslogpolicy_csvserver_binding resources configued on NetScaler.
"""
try :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
r""" Use this API to count the filtered set of auditsyslogpolicy_csvserver_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = auditsyslogpolicy_csvserver_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class auditsyslogpolicy_csvserver_binding_response(base_response) :
def __init__(self, length=1) :
self.auditsyslogpolicy_csvserver_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.auditsyslogpolicy_csvserver_binding = [auditsyslogpolicy_csvserver_binding() for _ in range(length)]
|
py
|
1a5c6d601c04f39eb961f4cfc1e4d19c9ca8c2a3
|
from presenter.converter import service, settingkey
ID_NONE = 'none'
ID_NOTIFY_LOADED_VIDEO = 'load_video'
class EventNotifier():
def __init__(self):
self.__callbackList = list()
def notifyEvent(self, notifyId=ID_NONE):
for func in self.__callbackList:
func(notifyId)
def addCallBack(self, func):
self.__callbackList.append(func)
def removeCallback(self, func):
self.__callbackList.remove(func)
class Presenter():
def __init__(self):
self.service = service
self.notifier = EventNotifier()
def setVideoPath(self, path=None):
self.service.setSettingsParam(settingkey.KEY_OF_IN, path)
self.notifier.notifyEvent(ID_NOTIFY_LOADED_VIDEO)
def getPreviewImage(self):
return self.service.doCreatePreviewImage()
def addEventCallback(self, func):
self.notifier.addCallBack(func)
def removeCallback(self, func):
self.notifier.removeCallback(func)
|
py
|
1a5c6da4e86bfacd1997edfdcdfa1957b1cd2818
|
from django.contrib import admin
from .models import SKYUrl
admin.site.register(SKYUrl)
|
py
|
1a5c6de06435410969ca7a2762451a2f2aa6e3d6
|
_NOW = 1151365354
_TIMEFORMAT = '%b %d %I:%M %p'
class DummyOptions:
make_pipes_error = None
fork_error = None
execv_error = None
kill_error = None
minfds = 5
loglevel = 20
def __init__(self):
self.identifier = 'supervisor'
self.childlogdir = '/tmp'
self.uid = 999
self.logger = self.getLogger()
self.backofflimit = 10
self.logfile = '/tmp/logfile'
self.nocleanup = False
self.strip_ansi = False
self.pidhistory = {}
self.process_group_configs = []
self.nodaemon = False
self.socket_map = {}
self.mood = 1
self.mustreopen = False
self.realizeargs = None
self.fds_cleaned_up = False
self.rlimit_set = False
self.setuid_called = False
self.httpservers_opened = False
self.signals_set = False
self.daemonized = False
self.make_logger_messages = None
self.autochildlogdir_cleared = False
self.cleaned_up = False
self.pidfile_written = False
self.directory = None
self.waitpid_return = None, None
self.kills = {}
self._signal = None
self.parent_pipes_closed = None
self.child_pipes_closed = None
self.forkpid = 0
self.pgrp_set = None
self.duped = {}
self.written = {}
self.fds_closed = []
self._exitcode = None
self.execv_args = None
self.setuid_msg = None
self.privsdropped = None
self.logs_reopened = False
self.environment_processed = False
self.select_result = [], [], []
self.select_error = None
self.write_accept = None
self.write_error = None
self.tempfile_name = '/foo/bar'
self.remove_error = None
self.removed = []
self.existing = []
self.openreturn = None
self.readfd_result = ''
self.parse_warnings = []
self.serverurl = 'http://localhost:9001'
self.changed_directory = False
self.chdir_error = None
self.umaskset = None
def getLogger(self, *args, **kw):
logger = DummyLogger()
logger.handlers = [DummyLogger()]
logger.args = args, kw
return logger
def realize(self, args, **kw):
self.realizeargs = args
self.realizekw = kw
def process_config(self, do_usage=True):
pass
def cleanup_fds(self):
self.fds_cleaned_up = True
def set_rlimits(self):
self.rlimits_set = True
return ['rlimits_set']
def set_uid(self):
self.setuid_called = True
return 'setuid_called'
def openhttpservers(self, supervisord):
self.httpservers_opened = True
def daemonize(self):
self.daemonized = True
def setsignals(self):
self.signals_set = True
def get_signal(self):
return self._signal
def get_socket_map(self):
return self.socket_map
def make_logger(self, critical_msgs, warn_msgs, info_msgs):
self.make_logger_messages = critical_msgs, warn_msgs, info_msgs
def clear_autochildlogdir(self):
self.autochildlogdir_cleared = True
def get_autochildlog_name(self, *ignored):
return self.tempfile_name
def cleanup(self):
self.cleaned_up = True
def write_pidfile(self):
self.pidfile_written = True
def waitpid(self):
return self.waitpid_return
def kill(self, pid, sig):
if self.kill_error:
raise OSError(self.kill_error)
self.kills[pid] = sig
def stat(self, filename):
import os
return os.stat(filename)
def get_path(self):
return ["/bin", "/usr/bin", "/usr/local/bin"]
def get_pid(self):
import os
return os.getpid()
def check_execv_args(self, filename, argv, st):
if filename == '/bad/filename':
from supervisor.options import NotFound
raise NotFound('bad filename')
def make_pipes(self, stderr=True):
if self.make_pipes_error:
raise OSError(self.make_pipes_error)
pipes = {}
pipes['child_stdin'], pipes['stdin'] = (3, 4)
pipes['stdout'], pipes['child_stdout'] = (5, 6)
if stderr:
pipes['stderr'], pipes['child_stderr'] = (7, 8)
else:
pipes['stderr'], pipes['child_stderr'] = None, None
return pipes
def write(self, fd, chars):
if self.write_error:
raise OSError(self.write_error)
if self.write_accept:
chars = chars[self.write_accept]
data = self.written.setdefault(fd, '')
data += chars
self.written[fd] = data
return len(chars)
def fork(self):
if self.fork_error:
raise OSError(self.fork_error)
return self.forkpid
def close_fd(self, fd):
self.fds_closed.append(fd)
def close_parent_pipes(self, pipes):
self.parent_pipes_closed = pipes
def close_child_pipes(self, pipes):
self.child_pipes_closed = pipes
def setpgrp(self):
self.pgrp_set = True
def dup2(self, frm, to):
self.duped[frm] = to
def _exit(self, code):
self._exitcode = code
def execve(self, filename, argv, environment):
if self.execv_error:
if self.execv_error == 1:
raise OSError(self.execv_error)
else:
raise RuntimeError(self.execv_error)
self.execv_args = (filename, argv)
self.execv_environment = environment
def dropPrivileges(self, uid):
if self.setuid_msg:
return self.setuid_msg
self.privsdropped = uid
def readfd(self, fd):
return self.readfd_result
def reopenlogs(self):
self.logs_reopened = True
def process_environment(self):
self.environment_processed = True
def mktempfile(self, prefix, suffix, dir):
return self.tempfile_name
def select(self, r, w, x, timeout):
import select
if self.select_error:
raise select.error(self.select_error)
return self.select_result
def remove(self, path):
import os
if self.remove_error:
raise os.error(self.remove_error)
self.removed.append(path)
def exists(self, path):
if path in self.existing:
return True
return False
def open(self, name, mode='r'):
if self.openreturn:
return self.openreturn
return open(name, mode)
def chdir(self, dir):
if self.chdir_error:
raise OSError(self.chdir_error)
self.changed_directory = True
def setumask(self, mask):
self.umaskset = mask
class DummyLogger:
def __init__(self):
self.reopened = False
self.removed = False
self.closed = False
self.data = []
def info(self, msg, **kw):
if kw:
msg = msg % kw
self.data.append(msg)
warn = debug = critical = trace = error = blather = info
def log(self, level, msg, **kw):
if kw:
msg = msg % kw
self.data.append(msg)
def reopen(self):
self.reopened = True
def close(self):
self.closed = True
def remove(self):
self.removed = True
def flush(self):
self.flushed = True
def getvalue(self):
return ''.join(self.data)
class DummySupervisor:
def __init__(self, options=None, state=None, process_groups=None):
if options is None:
self.options = DummyOptions()
else:
self.options = options
if state is None:
from supervisor.supervisord import SupervisorStates
self.options.mood = SupervisorStates.RUNNING
else:
self.options.mood = state
if process_groups is None:
self.process_groups = {}
else:
self.process_groups = process_groups
def get_state(self):
return self.options.mood
class DummySocket:
bind_called = False
bind_addr = None
listen_called = False
listen_backlog = None
close_called = False
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
def bind(self, addr):
self.bind_called = True
self.bind_addr = addr
def listen(self, backlog):
self.listen_called = True
self.listen_backlog = backlog
def close(self):
self.close_called = True
def __str__(self):
return 'dummy socket'
class DummySocketConfig:
def __init__(self, fd):
self.fd = fd
def addr(self):
return 'dummy addr'
def __eq__(self, other):
return self.fd == other.fd
def __ne__(self, other):
return not self.__eq__(other)
def create_and_bind(self):
return DummySocket(self.fd)
class DummySocketManager:
def __init__(self, config, **kwargs):
self._config = config
def config(self):
return self._config
def get_socket(self):
return DummySocket(self._config.fd)
class DummyProcess:
# Initial state; overridden by instance variables
pid = 0 # Subprocess pid; 0 when not running
laststart = 0 # Last time the subprocess was started; 0 if never
laststop = 0 # Last time the subprocess was stopped; 0 if never
delay = 0 # If nonzero, delay starting or killing until this time
administrative_stop = 0 # true if the process has been stopped by an admin
system_stop = 0 # true if the process has been stopped by the system
killing = 0 # flag determining whether we are trying to kill this proc
backoff = 0 # backoff counter (to backofflimit)
waitstatus = None
exitstatus = None
pipes = None
rpipes = None
dispatchers = None
stdout_logged = ''
stderr_logged = ''
spawnerr = None
stdout_buffer = '' # buffer of characters from child stdout output to log
stderr_buffer = '' # buffer of characters from child stderr output to log
stdin_buffer = '' # buffer of characters to send to child process' stdin
listener_state = None
group = None
def __init__(self, config, state=None):
self.config = config
self.logsremoved = False
self.stop_called = False
self.backoff_secs = None
self.spawned = False
if state is None:
from supervisor.process import ProcessStates
state = ProcessStates.RUNNING
self.state = state
self.error_at_clear = False
self.killed_with = None
self.drained = False
self.stdout_buffer = ''
self.stderr_buffer = ''
self.stdout_logged = ''
self.stderr_logged = ''
self.stdin_buffer = ''
self.pipes = {}
self.rpipes = {}
self.dispatchers = {}
self.finished = None
self.logs_reopened = False
self.execv_arg_exception = None
self.input_fd_drained = None
self.output_fd_drained = None
self.transitioned = False
self.write_error = None
def reopenlogs(self):
self.logs_reopened = True
def removelogs(self):
if self.error_at_clear:
raise IOError('whatever')
self.logsremoved = True
def get_state(self):
return self.state
def stop(self):
self.stop_called = True
self.killing = False
from supervisor.process import ProcessStates
self.state = ProcessStates.STOPPED
def kill(self, signal):
self.killed_with = signal
def spawn(self):
self.spawned = True
from supervisor.process import ProcessStates
self.state = ProcessStates.RUNNING
def drain(self):
self.drained = True
def __cmp__(self, other):
return cmp(self.config.priority, other.config.priority)
def readable_fds(self):
return []
def record_output(self):
self.stdout_logged += self.stdout_buffer
self.stdout_buffer = ''
self.stderr_logged += self.stderr_buffer
self.stderr_buffer = ''
def finish(self, pid, sts):
self.finished = pid, sts
def give_up(self):
from supervisor.process import ProcessStates
self.state = ProcessStates.FATAL
def get_execv_args(self):
if self.execv_arg_exception:
raise self.execv_arg_exception('whatever')
import shlex
commandargs = shlex.split(self.config.command)
program = commandargs[0]
return program, commandargs
def drain_output_fd(self, fd):
self.output_fd_drained = fd
def drain_input_fd(self, fd):
self.input_fd_drained = fd
def write(self, chars):
if self.write_error:
raise OSError(self.write_error)
self.stdin_buffer += chars
def transition(self):
self.transitioned = True
class DummyPConfig:
def __init__(self, options, name, command, directory=None, umask=None,
priority=999, autostart=True,
autorestart=True, startsecs=10, startretries=999,
uid=None, stdout_logfile=None, stdout_capture_maxbytes=0,
stdout_events_enabled=False,
stdout_logfile_backups=0, stdout_logfile_maxbytes=0,
stderr_logfile=None, stderr_capture_maxbytes=0,
stderr_events_enabled=False,
stderr_logfile_backups=0, stderr_logfile_maxbytes=0,
redirect_stderr=False,
stopsignal=None, stopwaitsecs=10, stopasgroup=False, killasgroup=False,
exitcodes=(0,2), environment=None, serverurl=None):
self.options = options
self.name = name
self.command = command
self.priority = priority
self.autostart = autostart
self.autorestart = autorestart
self.startsecs = startsecs
self.startretries = startretries
self.uid = uid
self.stdout_logfile = stdout_logfile
self.stdout_capture_maxbytes = stdout_capture_maxbytes
self.stdout_events_enabled = stdout_events_enabled
self.stdout_logfile_backups = stdout_logfile_backups
self.stdout_logfile_maxbytes = stdout_logfile_maxbytes
self.stderr_logfile = stderr_logfile
self.stderr_capture_maxbytes = stderr_capture_maxbytes
self.stderr_events_enabled = stderr_events_enabled
self.stderr_logfile_backups = stderr_logfile_backups
self.stderr_logfile_maxbytes = stderr_logfile_maxbytes
self.redirect_stderr = redirect_stderr
if stopsignal is None:
import signal
stopsignal = signal.SIGTERM
self.stopsignal = stopsignal
self.stopwaitsecs = stopwaitsecs
self.stopasgroup = stopasgroup
self.killasgroup = killasgroup
self.exitcodes = exitcodes
self.environment = environment
self.directory = directory
self.umask = umask
self.autochildlogs_created = False
self.serverurl = serverurl
def create_autochildlogs(self):
self.autochildlogs_created = True
def make_process(self, group=None):
process = DummyProcess(self)
process.group = group
return process
def make_dispatchers(self, proc):
use_stderr = not self.redirect_stderr
pipes = self.options.make_pipes(use_stderr)
stdout_fd,stderr_fd,stdin_fd = (pipes['stdout'],pipes['stderr'],
pipes['stdin'])
dispatchers = {}
if stdout_fd is not None:
dispatchers[stdout_fd] = DummyDispatcher(readable=True)
if stderr_fd is not None:
dispatchers[stderr_fd] = DummyDispatcher(readable=True)
if stdin_fd is not None:
dispatchers[stdin_fd] = DummyDispatcher(writable=True)
return dispatchers, pipes
def makeExecutable(file, substitutions=None):
import os
import sys
import tempfile
if substitutions is None:
substitutions = {}
data = open(file).read()
last = os.path.split(file)[1]
substitutions['PYTHON'] = sys.executable
for key in substitutions.keys():
data = data.replace('<<%s>>' % key.upper(), substitutions[key])
tmpnam = tempfile.mktemp(prefix=last)
f = open(tmpnam, 'w')
f.write(data)
f.close()
os.chmod(tmpnam, 0755)
return tmpnam
def makeSpew(unkillable=False):
import os
here = os.path.dirname(__file__)
if not unkillable:
return makeExecutable(os.path.join(here, 'fixtures/spew.py'))
return makeExecutable(os.path.join(here, 'fixtures/unkillable_spew.py'))
class DummyMedusaServerLogger:
def __init__(self):
self.logged = []
def log(self, category, msg):
self.logged.append((category, msg))
class DummyMedusaServer:
def __init__(self):
self.logger = DummyMedusaServerLogger()
class DummyMedusaChannel:
def __init__(self):
self.server = DummyMedusaServer()
self.producer = None
def push_with_producer(self, producer):
self.producer = producer
def close_when_done(self):
pass
class DummyRequest:
command = 'GET'
_error = None
_done = False
version = '1.0'
def __init__(self, path, params, query, fragment, env=None):
self.args = path, params, query, fragment
self.producers = []
self.headers = {}
self.header = []
self.outgoing = []
self.channel = DummyMedusaChannel()
if env is None:
self.env = {}
else:
self.env = env
def split_uri(self):
return self.args
def error(self, code):
self._error = code
def push(self, producer):
self.producers.append(producer)
def __setitem__(self, header, value):
self.headers[header] = value
def has_key(self, header):
return self.headers.has_key(header)
def done(self):
self._done = True
def build_reply_header(self):
return ''
def log(self, *arg, **kw):
pass
def cgi_environment(self):
return self.env
def get_server_url(self):
return 'http://example.com'
class DummyRPCInterfaceFactory:
def __init__(self, supervisord, **config):
self.supervisord = supervisord
self.config = config
class DummyRPCServer:
def __init__(self):
self.supervisor = DummySupervisorRPCNamespace()
self.system = DummySystemRPCNamespace()
class DummySystemRPCNamespace:
pass
class DummySupervisorRPCNamespace:
_restartable = True
_restarted = False
_shutdown = False
_readlog_error = False
from supervisor.process import ProcessStates
all_process_info = [
{
'name':'foo',
'group':'foo',
'pid':11,
'state':ProcessStates.RUNNING,
'statename':'RUNNING',
'start':_NOW - 100,
'stop':0,
'spawnerr':'',
'now':_NOW,
'description':'foo description',
},
{
'name':'bar',
'group':'bar',
'pid':12,
'state':ProcessStates.FATAL,
'statename':'FATAL',
'start':_NOW - 100,
'stop':_NOW - 50,
'spawnerr':'screwed',
'now':_NOW,
'description':'bar description',
},
{
'name':'baz_01',
'group':'baz',
'pid':13,
'state':ProcessStates.STOPPED,
'statename':'STOPPED',
'start':_NOW - 100,
'stop':_NOW - 25,
'spawnerr':'',
'now':_NOW,
'description':'baz description',
},
]
def getAPIVersion(self):
return '3.0'
getVersion = getAPIVersion # deprecated
def getPID(self):
return 42
def readProcessStdoutLog(self, name, offset, length):
from supervisor import xmlrpc
import xmlrpclib
if name == 'BAD_NAME':
raise xmlrpclib.Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME')
elif name == 'FAILED':
raise xmlrpclib.Fault(xmlrpc.Faults.FAILED, 'FAILED')
elif name == 'NO_FILE':
raise xmlrpclib.Fault(xmlrpc.Faults.NO_FILE, 'NO_FILE')
a = 'output line\n' * 10
return a[offset:]
readProcessLog = readProcessStdoutLog
readProcessStderrLog = readProcessStdoutLog
def getAllProcessInfo(self):
return self.all_process_info
def getProcessInfo(self, name):
from supervisor import xmlrpc
import xmlrpclib
from supervisor.process import ProcessStates
for i in self.all_process_info:
if i['name']==name:
info=i
return info
if name == 'BAD_NAME':
raise xmlrpclib.Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME')
if name == 'FAILED':
raise xmlrpclib.Fault(xmlrpc.Faults.FAILED, 'FAILED')
if name == 'NO_FILE':
raise xmlrpclib.Fault(xmlrpc.Faults.NO_FILE, 'NO_FILE')
def startProcess(self, name):
from supervisor import xmlrpc
from xmlrpclib import Fault
if name == 'BAD_NAME:BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME:BAD_NAME')
if name == 'BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME')
if name == 'NO_FILE':
raise Fault(xmlrpc.Faults.NO_FILE, 'NO_FILE')
if name == 'NOT_EXECUTABLE':
raise Fault(xmlrpc.Faults.NOT_EXECUTABLE, 'NOT_EXECUTABLE')
if name == 'ALREADY_STARTED':
raise Fault(xmlrpc.Faults.ALREADY_STARTED, 'ALREADY_STARTED')
if name == 'SPAWN_ERROR':
raise Fault(xmlrpc.Faults.SPAWN_ERROR, 'SPAWN_ERROR')
return True
def startProcessGroup(self, name):
from supervisor import xmlrpc
return [
{'name':'foo_00', 'group':'foo',
'status': xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'foo_01', 'group':'foo',
'status':xmlrpc.Faults.SUCCESS,
'description': 'OK'},
]
def startAllProcesses(self):
from supervisor import xmlrpc
return [
{'name':'foo', 'group':'foo',
'status': xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'foo2', 'group':'foo2',
'status':xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'failed', 'group':'failed_group',
'status':xmlrpc.Faults.SPAWN_ERROR,
'description':'SPAWN_ERROR'}
]
def stopProcessGroup(self, name):
from supervisor import xmlrpc
return [
{'name':'foo_00', 'group':'foo',
'status': xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'foo_01', 'group':'foo',
'status':xmlrpc.Faults.SUCCESS,
'description': 'OK'},
]
def stopProcess(self, name):
from supervisor import xmlrpc
from xmlrpclib import Fault
if name == 'BAD_NAME:BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME:BAD_NAME')
if name == 'BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME')
if name == 'NOT_RUNNING':
raise Fault(xmlrpc.Faults.NOT_RUNNING, 'NOT_RUNNING')
if name == 'FAILED':
raise Fault(xmlrpc.Faults.FAILED, 'FAILED')
return True
def stopAllProcesses(self):
from supervisor import xmlrpc
return [
{'name':'foo','group':'foo',
'status': xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'foo2', 'group':'foo2',
'status':xmlrpc.Faults.SUCCESS,'description': 'OK'},
{'name':'failed', 'group':'failed_group',
'status':xmlrpc.Faults.BAD_NAME,
'description':'FAILED'}
]
def restart(self):
if self._restartable:
self._restarted = True
return
from xmlrpclib import Fault
from supervisor import xmlrpc
raise Fault(xmlrpc.Faults.SHUTDOWN_STATE, '')
def shutdown(self):
if self._restartable:
self._shutdown = True
return
from xmlrpclib import Fault
from supervisor import xmlrpc
raise Fault(xmlrpc.Faults.SHUTDOWN_STATE, '')
def reloadConfig(self):
return [[['added'], ['changed'], ['removed']]]
def addProcessGroup(self, name):
from xmlrpclib import Fault
from supervisor import xmlrpc
if name == 'ALREADY_ADDED':
raise Fault(xmlrpc.Faults.ALREADY_ADDED, '')
if name == 'BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, '')
if hasattr(self, 'processes'):
self.processes.append(name)
else:
self.processes = [name]
def removeProcessGroup(self, name):
from xmlrpclib import Fault
from supervisor import xmlrpc
if name == 'STILL_RUNNING':
raise Fault(xmlrpc.Faults.STILL_RUNNING, '')
if name == 'BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, '')
self.processes.remove(name)
def clearProcessStdoutLog(self, name):
from xmlrpclib import Fault
from supervisor import xmlrpc
if name == 'BAD_NAME':
raise Fault(xmlrpc.Faults.BAD_NAME, 'BAD_NAME')
return True
clearProcessLog = clearProcessStdoutLog
clearProcessStderrLog = clearProcessStdoutLog
clearProcessLogs = clearProcessStdoutLog
def clearAllProcessLogs(self):
from supervisor import xmlrpc
return [
{'name':'foo', 'group':'foo',
'status':xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'foo2', 'group':'foo2',
'status':xmlrpc.Faults.SUCCESS,
'description': 'OK'},
{'name':'failed', 'group':'failed_group',
'status':xmlrpc.Faults.FAILED,
'description':'FAILED'}
]
def raiseError(self):
raise ValueError('error')
def getSupervisorVersion(self):
return '3000'
def readLog(self, whence, offset):
if self._readlog_error:
from xmlrpclib import Fault
raise Fault(self._readlog_error, '')
return 'mainlogdata'
class DummyPGroupConfig:
def __init__(self, options, name='whatever', priority=999, pconfigs=None):
self.options = options
self.name = name
self.priority = priority
if pconfigs is None:
pconfigs = []
self.process_configs = pconfigs
self.after_setuid_called = False
self.pool_events = []
self.buffer_size = 10
def after_setuid(self):
self.after_setuid_called = True
def make_group(self):
return DummyProcessGroup(self)
def __repr__(self):
return '<%s instance at %s named %s>' % (self.__class__, id(self),
self.name)
class DummyFCGIGroupConfig(DummyPGroupConfig):
def __init__(self, options, name='whatever', priority=999, pconfigs=None, socket_config=DummySocketConfig(1)):
DummyPGroupConfig.__init__(self, options, name, priority, pconfigs)
self.socket_config = socket_config
class DummyProcessGroup:
def __init__(self, config):
self.config = config
self.transitioned = False
self.all_stopped = False
self.dispatchers = {}
self.unstopped_processes = []
def transition(self):
self.transitioned = True
def stop_all(self):
self.all_stopped = True
def get_unstopped_processes(self):
return self.unstopped_processes
def get_dispatchers(self):
return self.dispatchers
class DummyFCGIProcessGroup(DummyProcessGroup):
def __init__(self, config):
DummyProcessGroup.__init__(self, config)
self.socket_manager = DummySocketManager(config.socket_config)
class PopulatedDummySupervisor(DummySupervisor):
def __init__(self, options, group_name, *pconfigs):
DummySupervisor.__init__(self, options)
self.process_groups = {}
processes = {}
self.group_name = group_name
gconfig = DummyPGroupConfig(options, group_name, pconfigs=pconfigs)
pgroup = DummyProcessGroup(gconfig)
self.process_groups[group_name] = pgroup
for pconfig in pconfigs:
process = DummyProcess(pconfig)
processes[pconfig.name] = process
pgroup.processes = processes
def set_procattr(self, process_name, attr_name, val, group_name=None):
if group_name is None:
group_name = self.group_name
process = self.process_groups[group_name].processes[process_name]
setattr(process, attr_name, val)
class DummyDispatcher:
write_event_handled = False
read_event_handled = False
error_handled = False
logs_reopened = False
logs_removed = False
closed = False
flush_error = None
flushed = False
def __init__(self, readable=False, writable=False, error=False):
self._readable = readable
self._writable = writable
self._error = error
self.input_buffer = ''
if readable:
# only readable dispatchers should have these methods
def reopenlogs():
self.logs_reopened = True
self.reopenlogs = reopenlogs
def removelogs():
self.logs_removed = True
self.removelogs = removelogs
def readable(self):
return self._readable
def writable(self):
return self._writable
def handle_write_event(self):
if self._error:
raise self._error
self.write_event_handled = True
def handle_read_event(self):
if self._error:
raise self._error
self.read_event_handled = True
def handle_error(self):
self.error_handled = True
def close(self):
self.closed = True
def flush(self):
if self.flush_error:
raise OSError(self.flush_error)
self.flushed = True
class DummyStream:
def __init__(self, error=None):
self.error = error
self.closed = False
self.flushed = False
self.written = ''
def close(self):
if self.error:
raise self.error
self.closed = True
def flush(self):
self.flushed = True
def write(self, msg):
if self.error:
raise self.error
self.written +=msg
def seek(self, num, whence=0):
pass
def tell(self):
return len(self.written)
class DummyEvent:
def __init__(self, serial='abc'):
if serial is not None:
self.serial = serial
def __str__(self):
return 'dummy event'
def dummy_handler(event, result):
pass
def rejecting_handler(event, result):
from supervisor.dispatchers import RejectEvent
raise RejectEvent(result)
def exception_handler(event, result):
raise ValueError(result)
def lstrip(s):
strings = [x.strip() for x in s.split('\n')]
return '\n'.join(strings)
|
py
|
1a5c6e8aec6b4c3f483a7683df7e9f62d5e77ba8
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Network is a composition of Layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import weakref
from tensorflow.python.eager import context
from tensorflow.python.estimator import util as estimator_util
from tensorflow.python.framework import ops
from tensorflow.python.layers import base
from tensorflow.python.ops import variable_scope
from tensorflow.python.training import checkpoint_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import training_util
# pylint: disable=protected-access
# Explanation for protected-access disable: Network has lots of same-class and
# parent-class references across different objects, and some to private
# functions in base.py which should be reused.
_DeferredRestoration = collections.namedtuple(
"_DeferredRestoration",
[
# The map_func to use (either user-specified or the default).
"map_func",
# Boolean, True if the user specified an explicit map_func, for error
# messages.
"map_func_is_user",
# A mapping from checkpoint names to initial values of not-yet-created
# variables which should be restored. These values come from parsing a
# checkpoint.
"checkpointed_variables_to_restore",
# A mapping from checkpoint name to variable objects of variables which
# have already been restored, for error checking.
"restored_variables",
# The session to restore with (if in graph mode).
"session",
# Names of the Network where the restore was requested, for error
# messages.
"network_name",
"network_scope_name"
])
def _default_naming_conflict_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The default checkpoint variable name mapping strategy for Network "
"'%s' resulted in a naming conflict. We attempted to strip off the "
"variable prefix for the Network ('%s'), but this resulted in two "
"variables named '%s' (originally '%s' and '%s'). This should only "
"happen when using variable sharing (i.e. the Network contains Networks "
"or Layers which were first added to another Network, and therefore "
"have that Network's variable prefix). One solution is to pass "
"`map_func=lambda n: n` to Network.save and Network.restore to use "
"fully qualified variable names in the checkpoint, although this will "
"require that the variable prefix of the Network being restored into "
"is also '%s'. You may alternatively write an arbitrary mapping.")
% (
network_name, network_scope_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name, network_scope_name
))
def _restore_custom_map_func_error_message(
mapped_name, first_variable, second_variable,
network_name, network_scope_name):
return (
("The map_func passed to Network.restore for the Network '%s' "
"resulted in two variables named '%s' (originally '%s' and '%s'). Since "
"this is also an error on Network.save, this Network was "
"probably not saved with this map_func. Note that map_func "
"always maps from full variable names to checkpoint names; "
"there is no need to specify an inverse mapping.\n\n"
"Try stripping less from the variable names, or renaming parts "
"of the Network. For reference, variables created by sub-Layers "
"of this Network are prefixed with '%s', but if they are "
"re-used after being added to another Network they will have "
"that Network's full variable prefix instead.") % (
network_name, mapped_name,
first_variable._shared_name,
second_variable._shared_name,
network_scope_name))
def _make_custom_getter_for_deferred_restorations():
"""Returns a custom getter which searches `deferred_restorations`.
Returns: A tuple of (_custom_getter, deferred_restorations)
_custom_getter: The getter which should be added to variable_scopes where
variables will be created.
deferred_restorations: A list for _DeferredRestoration objects. Typically
empty when the getter is set, and expanded as deferred restorations are
requested. All new deferred restorations should be appended to the end of
the list, where they will have priority over older deferred restorations.
"""
deferred_restorations = []
def _custom_getter(getter, name, shape=None, dtype=None,
initializer=None,
*args, **kwargs):
"""A custom getter which processes deferred restorations."""
# Iterate over restorations, newest first (newer restorations will take
# precedence over older restorations, just like with immediate restorations
# into existing variables).
delayed_restoration = None
found_value = False
value_to_restore = None
for delayed_restoration in reversed(
deferred_restorations):
checkpoint_name = delayed_restoration.map_func(name)
if (checkpoint_name
in delayed_restoration.checkpointed_variables_to_restore):
found_value = True
value_to_restore = (
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name])
if found_value:
break
# value_to_restore may be False because this variable is not in any
# checkpoint we are restoring, or None because we have explicitly set it to
# None when it was previously fetched. In either case, we don't need to
# set an initializer.
if found_value and value_to_restore is not None:
initializer = value_to_restore
shape = None
variable = getter(name, shape=shape, dtype=dtype, initializer=initializer,
*args, **kwargs)
if found_value and value_to_restore is not None:
# Mark as already restored from this checkpoint.
delayed_restoration.checkpointed_variables_to_restore[
checkpoint_name] = None
if context.in_graph_mode():
delayed_restoration.session.run(variable.initializer)
if found_value:
# Error checking should run even if we've already restored a value.
if delayed_restoration.restored_variables.setdefault(
checkpoint_name, variable) is not variable:
# Naming conflict. We've tried to initialize two variables with the
# same value from the checkpoint.
if delayed_restoration.map_func_is_user:
raise ValueError(
_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
else:
raise ValueError(
_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=delayed_restoration.restored_variables[
checkpoint_name],
second_variable=variable,
network_name=delayed_restoration.network_name,
network_scope_name=delayed_restoration.network_scope_name))
return variable
return _custom_getter, deferred_restorations
def _make_prefix_stripping_map_fn(scope_name):
"""Closure for stripping the scope name of a Network.
Implemented as a closure rather than a member function to avoid reference
cycles in deferred restorations (this function should not have a reference to
the Network which created it).
Args:
scope_name: The Network.scope_name to strip from variables.
Returns:
A scope_name-stripping default `map_fn` for the Network.
"""
def _strip_variable_prefix(original_variable_name):
"""The default map_func for saving or restoring variables.
Strips the variable prefix for the Network on which save/restore was called,
and leaves other variable names fully qualified in the checkpoint.
Args:
original_variable_name: The _shared_name of the variable (no :0
suffix) to map.
Returns:
The checkpoint name of the variable.
"""
scope_name_with_slash = scope_name + "/"
if original_variable_name.startswith(scope_name_with_slash):
return original_variable_name[len(scope_name_with_slash):]
else:
return original_variable_name
return _strip_variable_prefix
class Network(base.Layer):
"""Represents the composition of a set of Layers.
TODO(josh11b,ashankar):
- Should "trainable" be changeable on the Network object?
- Do we allow add_variable in Network?
- Detect layers used in __call__ that weren't registered with track_layer.
- Convert inputs to __call__ to tensors.
- Prevent variables from being created after the first __call__?
(Think about restoring from a checkpoint).
"""
def __init__(self, name=None):
if isinstance(name, variable_scope.VariableScope):
raise ValueError("VariableScopes are not valid Network names.")
if name is not None and "/" in name:
raise ValueError(
"Forward slashes ('/') are not allowed in Network names.")
super(Network, self).__init__(name=name)
self._layers = []
self._sub_layer_name_uids = collections.defaultdict(int)
# Initially None, but set to False for networks which are first built as
# top-level.
self._first_parent = None # A weak reference to our first parent.
self._non_network_sublayers = []
self._owned_layers = {}
# The scope to use if we end up without a parent.
self._default_parent_variable_scope = variable_scope.get_variable_scope()
# Hold on to the variable scope counts from init to check whether a scope
# with the name we want was ever created in our parent scope. Without this
# check we might have name collisions if the parent scope on init gets
# closed before build is called.
self._variable_scope_counts_on_init = (
variable_scope._get_default_variable_store().variable_scopes_count)
self._custom_getter, self._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
def _init_set_name(self, name):
# Anonymous Networks (name=None) defer setting a final name until they are
# (1) added to another Network, or (2) built/called (where (2) is only used
# for a "top level" network).
#
# However, if we were provided an explicit name (name is not None), that
# will always be the final name of the Network; if it turns out not to be
# unique or if variable names can't be prefixed by it we will throw an
# error.
self._name = name
self._base_name = None
def _finalize_name(self, parent_network):
if not self._name:
# Were were not passed a name explicitly (or it was blank), so this is an
# anonymous Network. We make up a unique name.
if parent_network:
avoid_names = parent_network._owned_layers
name_uid_map = parent_network._sub_layer_name_uids
else:
name_uid_map = base._get_default_graph_uid_map()
# Figure out which names we have to avoid based on which variable scope
# we're nested in.
strip_name = self._default_parent_variable_scope.name
if strip_name:
strip_name += "/"
def _strip_on_init_scope(name):
if name.startswith(strip_name):
return name[len(strip_name):]
else:
return None
avoid_names = set(
_strip_on_init_scope(name)
for name in self._variable_scope_counts_on_init.keys() if name)
self._name, self._base_name = self._make_unique_name(
name_uid_map=name_uid_map, avoid_names=avoid_names,
namespace=self._default_parent_variable_scope.name)
if self._first_parent is None or (self._first_parent # False = no parent
and self._first_parent() is None):
# Save a pointer to the parent Network so that we can later check that the
# scope name we get is correct.
if not parent_network:
self._first_parent = parent_network
else:
self._first_parent = weakref.ref(parent_network)
def _set_scope(self, scope=None):
if self._scope is None:
if not self._first_parent:
first_parent = self._first_parent
else:
first_parent = self._first_parent()
if first_parent is None:
# If we were never added to another Network, or that Network has beed
# garbage collected before being called, then we're a top-level Network.
self._finalize_name(
# Use False to make sure the value sticks and we don't inherit a
# parent if we're added to a network later.
parent_network=False)
if scope is not None:
raise ValueError("Networks may not be created with explicit scopes.")
if first_parent:
first_parent._set_scope()
parent_scope = first_parent._scope
else:
parent_scope = self._default_parent_variable_scope
with variable_scope.variable_scope(parent_scope) as parent_vs:
expected_scope_name = parent_vs.name + "/" + self._name
if expected_scope_name in self._variable_scope_counts_on_init:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
# Make sure variables with this prefix will be unique.
with variable_scope.variable_scope(
None, use_resource=True, default_name=self._name) as scope:
self._scope = scope
scope_name = scope.name
suffix_start = scope_name.rfind("/") + 1
# rfind is -1 if there is no slash in the string, in which case the
# suffix starts at the beginning of the string (there is no prefix).
scope_suffix = scope_name[suffix_start:]
scope_prefix = scope_name[:suffix_start]
if scope_suffix != self._name:
raise ValueError(
("A Network named '%s' already exists (or a variable_scope was "
"created with this name). Names must be unique.") % (
self._name,))
if (first_parent
and scope_prefix[:-1] != first_parent.scope_name):
raise ValueError(
("Network variable names must match a nesting of sub-Network "
"names. Expected prefix '%s' from parent network, but got "
"'%s' when attempting to create a variable_scope for Network "
"'%s'. Likely an explicit variable_scope was inserted into "
"the nesting.") % (
first_parent.scope_name,
scope_prefix[:-1],
self._name))
elif not first_parent and scope_prefix:
# For the case when this Network is not nested inside any other
# Network, but is in a variable_scope. This Network's name takes on
# the full variable scope prefix.
self._name = scope_name
for non_network_sublayer in self._non_network_sublayers:
self._set_scope_for_nonnetwork_sublayer(non_network_sublayer)
def _set_scope_for_nonnetwork_sublayer(self, sublayer):
if sublayer._scope is None:
if sublayer._first_parent is None:
constituent_first_parent = None
else:
constituent_first_parent = sublayer._first_parent()
if constituent_first_parent:
constituent_first_parent._set_scope()
parent_scope = constituent_first_parent._scope
else:
self._finalize_name(False)
raise ValueError(
("The parent of a Layer added to Network %s was garbage collected "
"before the Layer was built. If this limitation bothers you "
"please file a feature request.") %
(self.name,))
with variable_scope.variable_scope(parent_scope):
# Horrid hack to make Layer variable names which are direct
# sub-layers of Networks conform to the Network variable naming
# conventions.
with variable_scope.variable_scope(
None, use_resource=True,
default_name=sublayer.name) as sub_scope:
sublayer._scope = sub_scope
@base.Layer.name.getter
def name(self):
if self._name is None:
raise ValueError(
"The network does not yet have a final name, but a name was "
"requested for it. Networks get a name when they are added to "
"another Network via track_layer, or when they are first "
"called/built.")
return self._name
def track_layer(self, layer):
"""Track a Layer in this Network.
`Network` requires that all `Layer`s used in `call()` be tracked so that the
`Network` can export a complete list of variables.
Args:
layer: A `tf.layers.Layer` object.
Returns:
The passed in `layer`.
Raises:
RuntimeError: If __init__ has not been called.
TypeError: If `layer` is the wrong type.
ValueError: If a `Layer` with the same name has already been added.
"""
if not hasattr(self, "_layers"):
raise RuntimeError("Need to call Network.__init__ before adding layers")
if not isinstance(layer, base.Layer):
raise TypeError(
"Network.track_layer() passed type %s, not a tf.layers.Layer" %
(type(layer),))
if isinstance(layer, Network):
layer._finalize_name(parent_network=self)
else:
# `layer` is a non-Network, so it hasn't been named to follow Network
# conventions for contained Layers (i.e. the same conventions as for
# sub-Networks). This renaming is necessary to isolate Network variable
# naming from Layers constructed outside the Network and never added to it
# (because Layers are named globally).
if not layer.built:
if not hasattr(layer, "_first_parent"):
dereferenced_layer_first_parent = None
else:
dereferenced_layer_first_parent = layer._first_parent()
if dereferenced_layer_first_parent is None:
if layer._name != layer._base_name:
# If name and base_name do not match, then this Layer used anonymous
# naming and we have to rename it. Otherwise there's an explicit
# name, and we should respect it (subject to error checking).
layer._name, layer._base_name = layer._make_unique_name(
name_uid_map=self._sub_layer_name_uids,
avoid_names=self._owned_layers
# No namespace required, since we've specified our own UID map.
)
layer._first_parent = weakref.ref(self)
self._non_network_sublayers.append(layer)
if (not layer.built
and layer._first_parent
and self is layer._first_parent()):
if layer.name in self._owned_layers:
if self._owned_layers[layer.name] is layer:
return layer
raise ValueError(
"Attempt to add two Layers with the name '%s' to the same Network."
% (layer.name))
self._owned_layers[layer.name] = layer
self._layers.append(layer)
return layer
def get_layer(self, name=None, index=None):
"""Get a contained `tf.layers.Layer` either by name or index.
Args:
name: String matching one of the names of a contained `Layer`. Note that
the names of `Layer`s added to `Network`s may not be unique when doing
layer sharing (i.e. adding a `Layer` to this `Network` which was already
added to another `Network`). The lowest index `Layer` with a matching
name will be returned.
index: Integer in [0, number of layers). Layers are assigned an index
by the order they are added.
Returns:
A `tf.layers.Layer` object.
Raises:
ValueError: If neither or both of 'index' or 'name' is specified, or the
lookup failed.
"""
if index is not None:
if name is not None:
raise ValueError("Exactly one of 'index' or 'name' must be provided")
if len(self._layers) <= index:
raise ValueError("Was asked to retrieve layer at index " + str(index) +
" but model only has " + str(len(self._layers)) +
" layers.")
else:
return self._layers[index]
else:
if not name:
raise ValueError("Provide either a layer name or layer index.")
for layer in self._layers:
if layer.name == name:
return layer
raise ValueError("No such layer: " + name)
# The following methods are for implementing the Layer interface.
@property
def weights(self):
# TODO(josh11b): Should this return a set or perform de-duplication of
# variables in the case of shared layers/variables that appear in
# multiple places in the Network?
weights = []
for layer in self._layers:
weights += layer.weights
return weights
@property
def trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.trainable_weights
return weights
@property
def non_trainable_weights(self):
weights = []
for layer in self._layers:
weights += layer.non_trainable_weights
return weights
@property
def trainable(self):
return True
@trainable.setter
def trainable(self, value):
if not value:
# We believe it better to decide which layers & networks are trainable
# at the Trainer level than here. Otherwise you can run into trouble if a
# layer/network is shared between two models, but is trainable in one
# but not the other (like with adversarial networks).
raise AttributeError("cannot mark Network as not trainable")
@property
def layers(self):
return self._layers
def add_variable(self, name, shape, dtype=None, initializer=None,
regularizer=None, trainable=True, constraint=None):
raise RuntimeError(
"add_variable not supported in Network class yet. Please file an issue "
"at https://github.com/tensorflow/tensorflow/issues/new if this is "
"important to you")
def save(self, save_path, global_step=None, map_func=None):
"""Save variables from the Network to a checkpoint.
Args:
save_path: Either a checkpoint prefix or the name of a directory to save
the checkpoint in (in which case the checkpoint will be named based on
the Network name).
global_step: The global step to use when naming the checkpoint. If None
(default), we will first try to get the default global step. If that
fails because no default global step exists, then the checkpoint is
created without a global step suffix.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged.
Returns:
The checkpoint prefix for the saved checkpoint, which may be passed to
`Network.restore`.
Raises:
ValueError: If the Network has not yet been called, or if map_func results
in a name collision.
"""
if not self.built:
raise ValueError(
"Attempt to save the Network before it was first called. This means "
"variables have not yet been created, so there is nothing to save.")
self._set_scope() # scope_name should be available to map_funcs
if global_step is None:
global_step = training_util.get_global_step()
if os.path.isdir(save_path):
# If we were passed a directory, default to naming based on the Network
# name.
save_path = os.path.join(save_path, self.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(self.scope_name)
variable_map = {}
for variable in self.variables:
mapped_name = map_func(variable._shared_name)
if variable_map.setdefault(mapped_name, variable) is not variable:
if user_map_func is None:
# Instead of erroring out, we could just re-try and silently use the
# full variable names in the checkpoint. This could be odd for deeply
# nested sub-Networks (since the full prefix from the nesting would
# get added), so for now we'll let the user deal with this case.
raise ValueError(_default_naming_conflict_error_message(
mapped_name=mapped_name,
first_variable=variable_map[mapped_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
# The user passed their own problematic map_func.
raise ValueError(
("The map_func passed to Network.save for the Network '%s' "
"resulted in two variables named '%s' ('%s' and '%s'). Try "
"stripping less from the variable names, or renaming parts of "
"the Network. For reference, variables created by sub-Layers of "
"this Network are prefixed with '%s', but if they are re-used "
"after being added to another Network, they will have that "
"Network's full variable prefix instead.") % (
self.name, mapped_name,
variable_map[mapped_name]._shared_name,
variable._shared_name,
self.scope_name))
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
return saver_lib.Saver(variable_map).save(
sess=sess, save_path=save_path, write_meta_graph=False,
global_step=global_step)
def _restore_existing_variables(self, save_path, map_func, user_map_func):
"""Use a standard Saver to restore existing variables from a checkpoint.
Args:
save_path: The checkpoint prefix or directory to read from.
map_func: The function to use when mapping from variable names to
checkpoint names.
user_map_func: The original map_func passed by the user, for error
checking.
Returns:
A dictionary mapping from checkpoint names to variable objects which have
been restored (for bookkeeping to avoid deferred restorations on these
variables).
Raises:
ValueError: If there is a name collision.
"""
existing_variables_by_checkpoint_name = {}
for variable in self.variables:
checkpoint_name = map_func(variable._shared_name)
if existing_variables_by_checkpoint_name.setdefault(
checkpoint_name, variable) is not variable:
if user_map_func is None:
raise ValueError(_default_naming_conflict_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
else:
raise ValueError(_restore_custom_map_func_error_message(
mapped_name=checkpoint_name,
first_variable=existing_variables_by_checkpoint_name[
checkpoint_name],
second_variable=variable,
network_name=self.name,
network_scope_name=self.scope_name))
if existing_variables_by_checkpoint_name:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
saver_lib.Saver(var_list=existing_variables_by_checkpoint_name).restore(
sess=sess, save_path=save_path)
return existing_variables_by_checkpoint_name
def _set_restore_on_create(self, save_path, map_func, user_map_func,
existing_variables_by_checkpoint_name):
"""If necessary, request deferred restorations of variables."""
checkpoint_reader = checkpoint_utils.load_checkpoint(save_path)
checkpointed_variables_to_restore = {}
for checkpoint_name, _ in checkpoint_utils.list_variables(save_path):
if checkpoint_name in existing_variables_by_checkpoint_name:
# This variable was already created and restored.
continue
# Save the variable for later restoration in a custom getter.
checkpointed_variables_to_restore[checkpoint_name] = (
checkpoint_reader.get_tensor(checkpoint_name))
# Only set a deferred restoration if there are checkpoint variables which
# have not been assigned to existing variables. Note that this loses out on
# some opportunity for error checking, but avoids creating
# _DeferredRestoration objects once a Network has been built (so that
# restoring in a loop does not take increasing amounts of memory).
if checkpointed_variables_to_restore:
if context.in_eager_mode():
sess = None
else:
sess = ops.get_default_session()
# We need a name for error messages. If we haven't been added to another
# Network yet, we're top-level.
self._finalize_name(False)
self._set_scope()
# Save a record of this restoration for use in the custom getter.
deferred_restoration = _DeferredRestoration(
map_func=map_func,
map_func_is_user=(user_map_func is not None),
checkpointed_variables_to_restore=checkpointed_variables_to_restore,
restored_variables={},
session=sess,
network_name=self.name,
network_scope_name=self.scope_name)
self._deferred_restorations.append(deferred_restoration)
# Add the deferred registration to non-Network children, and request that
# Networks propagate the request to their children.
self._add_deferred_restoration(deferred_restoration)
def _add_deferred_restoration(self, deferred_restoration):
"""Add a deferred restoration to this Network and all children.
Restorations which are requested later have higher priority, and the highest
priority matching restoration is applied to a variable when it is created.
Args:
deferred_restoration: A _DeferredRestoration object.
"""
# Networks don't create variables at the moment, so this append isn't
# strictly necessary. We could get by with only adding deferred restorations
# to non-Network Layers.
self._set_scope()
# We use set_custom_getter because it avoids recursively calling up the
# variable_scope tree. We've done the tree traversal ourselves and have
# added the request to each Layer which needs it.
self._scope.set_custom_getter(self._custom_getter)
self._deferred_restorations.append(deferred_restoration)
for layer in self.layers:
if isinstance(layer, Network):
# For Networks, request that they propagate this deferred restoration
# to all of their children recursively.
layer._add_deferred_restoration(deferred_restoration)
else:
# For non-Network Layers, make sure they have a deferred restoration
# queue and a custom getter, then add our request to it.
if not hasattr(layer, "_custom_getter"):
assert not hasattr(layer, "_deferred_restorations")
layer._custom_getter, layer._deferred_restorations = (
_make_custom_getter_for_deferred_restorations())
self._set_scope_for_nonnetwork_sublayer(layer)
layer._scope.set_custom_getter(layer._custom_getter)
layer._deferred_restorations.append(deferred_restoration)
def restore(self, save_path, map_func=None):
"""Restore the Network from a checkpoint.
If variables have already been created (typically when some or all of the
`Network` is built), they are assigned values from the checkpoint
immediately, overwriting any existing values (in graph mode the default
session is used for the assignments).
If there are checkpoint entries which do not correspond to any existing
variables in the `Network`, these values are saved for deferred restoration;
their initial values will be the checkpointed values once they are
created. Requests for multiple deferred restorations behave the same way as
immediate restorations, in that later requests will take priority over
earlier requests relevant to the same variable.
If this `Network` shares `Layer`s with another network, those `Layer`s will
also have their variables restored from the checkpoint.
Args:
save_path: The return value of `Network.save`, or a directory to search
for a checkpoint.
map_func: A function mapping fully qualified variable names
(e.g. 'my_network_1/dense_1/kernel') to names in the checkpoint. By
default (if `map_func=None`), the variable prefix for the network being
restored (`Network.scope_name + '/'`, e.g. 'my_network_1/') is stripped
and all other variable names (shared with other Networks) are left
unchanged. Note that this is the _same_ map_func as `Network.save`, not
an inverse mapping.
"""
self._finalize_name(parent_network=False)
self._set_scope() # scope_name should be available to map_funcs
if os.path.isdir(save_path):
# If we don't have a name yet, set no parent.
save_path = os.path.join(save_path, self.name.replace("/", "_"))
user_map_func = map_func
if map_func is None:
map_func = _make_prefix_stripping_map_fn(self.scope_name)
# Step one is to restore any existing variables from the checkpoint.
existing_variables_by_checkpoint_name = self._restore_existing_variables(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func)
# Step two is to set a custom getter which restores variables on creation,
# for those variables which have not been added to sub-Layers yet.
self._set_restore_on_create(
save_path=save_path,
map_func=map_func,
user_map_func=user_map_func,
existing_variables_by_checkpoint_name=(
existing_variables_by_checkpoint_name))
# TODO(josh11b): Support other Layer methods needed for graph mode, such as for
# losses and updates
class Sequential(Network):
"""Represents a linear sequence of Layers or functions.
The output of each layer/function is provided as the input to the next.
The inputs passed to `__call__` are passed to the inputs of the first
Layer, and it returns the outputs of the last Layer.
Args:
layers_funcs: An optional sequence where each element is either a
tf.layers.Layer object or a callable.
name: An optional string name to use for this Network.
"""
def __init__(self, layers_funcs=None, name=None):
super(Sequential, self).__init__(name=name)
self._layers_funcs = []
if layers_funcs:
for l in layers_funcs:
self.add(l)
def add(self, layer_func):
if isinstance(layer_func, base.Layer):
args = estimator_util.fn_args(layer_func.call)
self.track_layer(layer_func)
elif callable(layer_func):
args = estimator_util.fn_args(layer_func)
else:
raise TypeError(
"Sequential.add() takes only tf.layers.Layer objects or callables; "
"not '%s' of type '%s'." % (layer_func, type(layer_func)))
self._layers_funcs.append((("training" in args), layer_func))
def call(self, inputs, training=None):
"""Call each Layer in the order they were added."""
# TODO(josh11b): Support "mode" and maybe other arguments
if training is None:
for _, l in self._layers_funcs:
inputs = l(inputs)
else:
for has_training_arg, l in self._layers_funcs:
if has_training_arg:
inputs = l(inputs, training)
else:
inputs = l(inputs)
return inputs
|
py
|
1a5c6ed6cbff060cf8510678b5d32f98c60ab885
|
"""
Support for per-request messages to be shown to the user.
These utilities are based upon the Django message framework, and allow
code to register messages to be shown to the user on their next page
view. These messages are shown in a page banner which is supported on
all pages that utilize the main.html template.
There are two common use cases:
- register a message before rendering a view, in which case the message
will be shown on the resulting page
- register a message before posting or redirecting. In these situations
the message will be shown on the subsequent page. This is typically
used to show a success message to the use.
"""
from abc import abstractmethod
from enum import Enum
from django.contrib import messages
from django.utils.translation import ugettext as _
from openedx.core.djangolib.markup import HTML, Text
class UserMessageType(Enum):
"""
An enumeration of the types of user messages.
"""
INFO = messages.constants.INFO
SUCCESS = messages.constants.SUCCESS
WARNING = messages.constants.WARNING
ERROR = messages.constants.ERROR
CSS_CLASSES = {
UserMessageType.INFO: 'alert-info',
UserMessageType.SUCCESS: 'alert-success',
UserMessageType.WARNING: 'alert-warning',
UserMessageType.ERROR: 'alert-danger',
}
ICON_CLASSES = {
UserMessageType.INFO: 'fa fa-bullhorn',
UserMessageType.SUCCESS: 'fa fa-check-circle',
UserMessageType.WARNING: 'fa fa-warning',
UserMessageType.ERROR: 'fa fa-warning',
}
class UserMessage():
"""
Representation of a message to be shown to a user.
"""
def __init__(self, type, message_html): # lint-amnesty, pylint: disable=redefined-builtin
assert isinstance(type, UserMessageType)
self.type = type
self.message_html = message_html
@property
def css_class(self):
"""
Returns the CSS class to be used on the message element.
"""
return CSS_CLASSES[self.type]
@property
def icon_class(self):
"""
Returns the CSS icon class representing the message type.
"""
return ICON_CLASSES[self.type]
class UserMessageCollection():
"""
A collection of messages to be shown to a user.
"""
@classmethod
@abstractmethod
def get_namespace(self): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Returns the namespace of the message collection.
The name is used to namespace the subset of django messages.
For example, return 'course_home_messages'.
"""
raise NotImplementedError('Subclasses must define a namespace for messages.')
@classmethod
def get_message_html(cls, body_html, title=None, dismissable=False, **kwargs): # pylint: disable=unused-argument
"""
Returns the entire HTML snippet for the message.
Classes that extend this base class can override the message styling
by implementing their own version of this function. Messages that do
not use a title can just pass the body_html.
"""
if title:
return Text(_('{header_open}{title}{header_close}{body}')).format(
header_open=HTML('<div class="message-header">'),
title=title,
body=body_html,
header_close=HTML('</div>')
)
return body_html
@classmethod
def register_user_message(cls, request, message_type, body_html, once_only=False, **kwargs):
"""
Register a message to be shown to the user in the next page.
Arguments:
message_type (UserMessageType): the user message type
body_html (str): body of the message in html format
title (str): optional title for the message as plain text
dismissable (bool): shows a dismiss button (defaults to no button)
once_only (bool): show the message only once per request
"""
assert isinstance(message_type, UserMessageType)
message = Text(cls.get_message_html(body_html, **kwargs))
if not once_only or message not in [m.message for m in messages.get_messages(request)]:
messages.add_message(request, message_type.value, Text(message), extra_tags=cls.get_namespace())
@classmethod
def register_info_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers an information message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.INFO, message, **kwargs)
@classmethod
def register_success_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers a success message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.SUCCESS, message, **kwargs)
@classmethod
def register_warning_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers a warning message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.WARNING, message, **kwargs)
@classmethod
def register_error_message(self, request, message, **kwargs): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Registers an error message to be shown to the user.
"""
self.register_user_message(request, UserMessageType.ERROR, message, **kwargs)
@classmethod
def user_messages(self, request): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Returns any outstanding user messages.
Note: this function also marks these messages as being complete
so they won't be returned in the next request.
"""
def _get_message_type_for_level(level):
"""
Returns the user message type associated with a level.
"""
for __, type in UserMessageType.__members__.items(): # lint-amnesty, pylint: disable=redefined-builtin, no-member
if type.value is level:
return type
raise Exception(f'Unable to find UserMessageType for level {level}')
def _create_user_message(message):
"""
Creates a user message from a Django message.
"""
return UserMessage(
type=_get_message_type_for_level(message.level),
message_html=str(message.message),
)
django_messages = messages.get_messages(request)
return (_create_user_message(message) for message in django_messages if self.get_namespace() in message.tags)
class PageLevelMessages(UserMessageCollection):
"""
This set of messages appears as top page level messages.
"""
NAMESPACE = 'page_level_messages'
@classmethod
def get_message_html(cls, body_html, title=None, dismissable=False, **kwargs):
"""
Returns the entire HTML snippet for the message.
"""
if title:
title_area = Text(_('{header_open}{title}{header_close}')).format(
header_open=HTML('<div class="message-header">'),
title=title,
header_close=HTML('</div>')
)
else:
title_area = ''
if dismissable:
dismiss_button = HTML(
'<div class="message-actions">'
'<button class="btn-link action-dismiss">'
'<span class="sr">{dismiss_text}</span>'
'<span class="icon fa fa-times" aria-hidden="true"></span></button>'
'</div>'
).format(
dismiss_text=Text(_("Dismiss"))
)
else:
dismiss_button = ''
return Text('{title_area}{body_area}{dismiss_button}').format(
title_area=title_area,
body_area=HTML('<div class="message-content">{body_html}</div>').format(
body_html=body_html,
),
dismiss_button=dismiss_button,
)
@classmethod
def get_namespace(self): # lint-amnesty, pylint: disable=bad-classmethod-argument
"""
Returns the namespace of the message collection.
"""
return self.NAMESPACE
|
py
|
1a5c6ef0019ffa69fed5a60a4aa28785f6e60409
|
"""
sentry.models.groupshare
~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from uuid import uuid4
from django.conf import settings
from django.db import models
from django.utils import timezone
from sentry.db.models import FlexibleForeignKey, Model, BaseManager, sane_repr
class GroupShare(Model):
"""
A Group that was shared publicly.
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project')
group = FlexibleForeignKey('sentry.Group', unique=True)
uuid = models.CharField(max_length=32, unique=True, default=lambda: uuid4().hex)
# Tracking the user that initiated the share.
user = FlexibleForeignKey(settings.AUTH_USER_MODEL, null=True)
date_added = models.DateTimeField(default=timezone.now)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_groupshare'
__repr__ = sane_repr('project_id', 'group_id', 'uuid')
|
py
|
1a5c6f14289beb2a51ddc098e56787e901254058
|
from amqpstorm.compatibility import json
from amqpstorm.compatibility import quote
from amqpstorm.management.base import ManagementHandler
API_CONNECTION = 'connections/%s'
API_CONNECTIONS = 'connections'
class Connection(ManagementHandler):
def get(self, connection):
"""Get Connection details.
:param str connection: Connection name
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: dict
"""
return self.http_client.get(API_CONNECTION % connection)
def list(self):
"""Get Connections.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: list
"""
return self.http_client.get(API_CONNECTIONS)
def close(self, connection, reason='Closed via management api'):
"""Close Connection.
:param str connection: Connection name
:param str reason: Reason for closing connection.
:raises ApiError: Raises if the remote server encountered an error.
:raises ApiConnectionError: Raises if there was a connectivity issue.
:rtype: None
"""
close_payload = json.dumps({
'name': connection,
'reason': reason
})
connection = quote(connection, '')
return self.http_client.delete(API_CONNECTION % connection,
payload=close_payload,
headers={
'X-Reason': reason
})
|
py
|
1a5c6fa1fe12ea66b9fc41bcfac0963b397b49b4
|
"""Headers Support
This module implements support for parsing and handling headers.
"""
import re
from circuits.six import b, iteritems, u
# Regular expression that matches `special' characters in parameters, the
# existance of which force quoting of the parameter value.
tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
q_separator = re.compile(r'; *q *=')
def _formatparam(param, value=None, quote=1):
"""Convenience function to format and return a key=value pair.
This will quote the value if needed or if quote is true.
"""
if value is not None and len(value) > 0:
if quote or tspecials.search(value):
value = value.replace('\\', '\\\\').replace('"', r'\"')
return '%s="%s"' % (param, value)
else:
return '%s=%s' % (param, value)
else:
return param
def header_elements(fieldname, fieldvalue):
"""Return a sorted HeaderElement list.
Returns a sorted HeaderElement list
from a comma-separated header string.
"""
if not fieldvalue:
return []
result = []
for element in fieldvalue.split(","):
if fieldname.startswith("Accept") or fieldname == 'TE':
hv = AcceptElement.from_str(element)
else:
hv = HeaderElement.from_str(element)
result.append(hv)
return list(reversed(sorted(result)))
class HeaderElement(object):
"""An element (with parameters) from an HTTP header's element list."""
def __init__(self, value, params=None):
self.value = value
if params is None:
params = {}
self.params = params
def __eq__(self, other):
return self.value == other.value
def __lt__(self, other):
return self.value < other.value
def __str__(self):
p = [";%s=%s" % (k, v) for k, v in iteritems(self.params)]
return "%s%s" % (self.value, "".join(p))
def __bytes__(self):
return b(self.__str__())
def __unicode__(self):
return u(self.__str__())
def parse(elementstr):
"""Transform 'token;key=val' to ('token', {'key': 'val'})."""
# Split the element into a value and parameters. The 'value' may
# be of the form, "token=token", but we don't split that here.
atoms = [x.strip() for x in elementstr.split(";") if x.strip()]
if not atoms:
initial_value = ''
else:
initial_value = atoms.pop(0).strip()
params = {}
for atom in atoms:
atom = [x.strip() for x in atom.split("=", 1) if x.strip()]
key = atom.pop(0)
if atom:
val = atom[0]
else:
val = ""
params[key] = val
return initial_value, params
parse = staticmethod(parse)
@classmethod
def from_str(cls, elementstr):
"""Construct an instance from a string of the form 'token;key=val'."""
ival, params = cls.parse(elementstr)
return cls(ival, params)
class AcceptElement(HeaderElement):
"""An element (with parameters) from an Accept* header's element list.
AcceptElement objects are comparable; the more-preferred object will be
"less than" the less-preferred object. They are also therefore sortable;
if you sort a list of AcceptElement objects, they will be listed in
priority order; the most preferred value will be first. Yes, it should
have been the other way around, but it's too late to fix now.
"""
@classmethod
def from_str(cls, elementstr):
qvalue = None
# The first "q" parameter (if any) separates the initial
# media-range parameter(s) (if any) from the accept-params.
atoms = q_separator.split(elementstr, 1)
media_range = atoms.pop(0).strip()
if atoms:
# The qvalue for an Accept header can have extensions. The other
# headers cannot, but it's easier to parse them as if they did.
qvalue = HeaderElement.from_str(atoms[0].strip())
media_type, params = cls.parse(media_range)
if qvalue is not None:
params["q"] = qvalue
return cls(media_type, params)
def qvalue(self):
val = self.params.get("q", "1")
if isinstance(val, HeaderElement):
val = val.value
return float(val)
qvalue = property(qvalue, doc="The qvalue, or priority, of this value.")
def __eq__(self, other):
return self.qvalue == other.qvalue
def __lt__(self, other):
if self.qvalue == other.qvalue:
return str(self) < str(other)
else:
return self.qvalue < other.qvalue
class CaseInsensitiveDict(dict):
"""A case-insensitive dict subclass.
Each key is changed on entry to str(key).title().
"""
def __init__(self, *args, **kwargs):
d = dict(*args, **kwargs)
for key, value in iteritems(d):
dict.__setitem__(self, str(key).title(), value)
dict.__init__(self)
def __getitem__(self, key):
return dict.__getitem__(self, str(key).title())
def __setitem__(self, key, value):
dict.__setitem__(self, str(key).title(), value)
def __delitem__(self, key):
dict.__delitem__(self, str(key).title())
def __contains__(self, key):
return dict.__contains__(self, str(key).title())
def get(self, key, default=None):
return dict.get(self, str(key).title(), default)
def update(self, E):
for k in E.keys():
self[str(k).title()] = E[k]
@classmethod
def fromkeys(cls, seq, value=None):
newdict = cls()
for k in seq:
newdict[k] = value
return newdict
def setdefault(self, key, x=None):
key = str(key).title()
try:
return dict.__getitem__(self, key)
except KeyError:
self[key] = x
return x
def pop(self, key, default=None):
return dict.pop(self, str(key).title(), default)
class Headers(CaseInsensitiveDict):
"""
This class implements a storage for headers as key value pairs.
The underlying model of a case insensitive dict matches the requirements
for headers quite well, because usually header keys are unique. If
several values may be associated with a header key, most HTTP headers
represent the values as an enumeration using a comma as item separator.
There is, however one exception (currently) to this rule. In order to
set several cookies, there should be multiple headers with the same
key, each setting one cookie ("Set-Cookie: some_cookie").
This is modeled by having either a string (common case) or a list
(cookie case) as value in the underlying dict. In order to allow
easy iteration over all headers as they appear in the HTTP request,
the items() method expands associated lists of values. So if you have
{ "Set-Cookie": [ "cookie1", "cookie2" ] }, the items() method returns
the two pairs ("Set-Cookie", "cookie1") and ("Set-Cookie", "cookie2").
This is convenient for most use cases. The only drawback is that
len(keys()) is not equal to len(items()) for this specialized dict.
"""
def elements(self, key):
"""Return a sorted list of HeaderElements for the given header."""
return header_elements(key, self.get(key))
def get_all(self, name):
"""Return a list of all the values for the named field."""
value = self.get(name, '')
if isinstance(value, list):
return value
return [val.strip() for val in value.split(',')]
def __repr__(self):
return "Headers(%s)" % repr(list(self.items()))
def __str__(self):
headers = ["%s: %s\r\n" % (k, v) for k, v in self.items()]
return "".join(headers) + '\r\n'
def items(self):
for k, v in super(Headers, self).items():
if isinstance(v, list):
for vv in v:
yield (k, vv)
else:
yield (k, v)
def __bytes__(self):
return str(self).encode("latin1")
def append(self, key, value):
"""
If a header with the given name already exists, the value is
normally appended to the existing value separated by a comma.
If, however, the already existing entry associated key with a
value of type list (as is the case for "Set-Cookie"),
the new value is appended to that list.
"""
if key not in self:
if key.lower() == "set-cookie":
self[key] = [value]
else:
self[key] = value
else:
if isinstance(self[key], list):
self[key].append(value)
else:
self[key] = ", ".join([self[key], value])
def add_header(self, _name, _value, **_params):
"""Extended header setting.
_name is the header field to add. keyword arguments can be used to set
additional parameters for the header field, with underscores converted
to dashes. Normally the parameter will be added as key="value" unless
value is None, in which case only the key will be added.
Example:
h.add_header('content-disposition', 'attachment', filename='bud.gif')
Note that unlike the corresponding 'email.Message' method, this does
*not* handle '(charset, language, value)' tuples: all values must be
strings or None.
"""
parts = []
if _value is not None:
parts.append(_value)
for k, v in list(_params.items()):
k = k.replace('_', '-')
if v is None:
parts.append(k)
else:
parts.append(_formatparam(k, v))
self.append(_name, "; ".join(parts))
|
py
|
1a5c6fcb4474df47a1f03b8045464ca650ff6523
|
import json
import logging
import requests
from django.conf import settings
NAMESPACES = {"dcim": "dcim"}
class NetBox(object):
"""
Class used to interact with the NetBox API.
"""
logger = logging.getLogger("peering.manager.netbox")
def lookup(self, namespace, search):
"""
Sends a get request to the API given a namespace and some parameters.
"""
# Enforce trailing slash and add namespace
api_url = settings.NETBOX_API.strip("/") + "/" + namespace
# Set token in the headers
headers = {
"accept": "application/json",
"authorization": "Token {}".format(settings.NETBOX_API_TOKEN),
}
# Make the request
self.logger.debug("calling api: %s | %s", api_url, search)
response = requests.get(api_url, headers=headers, params=search)
return response.json() if response.status_code == 200 else None
def get_devices(self):
"""
Return all devices found with the NetBox API.
"""
result = self.lookup(NAMESPACES["dcim"] + "/devices", {})
if not result or result["count"] == 0:
return None
return [
device
for device in result["results"]
if device["device_role"]["slug"] in settings.NETBOX_DEVICE_ROLES
]
|
py
|
1a5c70e8315f30953ceb050c28d80960d06f7980
|
import numpy as np
import cv2
def empty(z):
pass
image = np.zeros((300, 512, 3), np.uint8)
cv2.namedWindow('Palette')
cv2.createTrackbar('B', 'Palette', 0, 255, empty)
cv2.createTrackbar('G', 'Palette', 0, 255, empty)
cv2.createTrackbar('R', 'Palette', 0, 255, empty)
while(True):
cv2.imshow('Palette', image)
if cv2.waitKey(1) == 27 :
break
blue = cv2.getTrackbarPos('B', 'Palette')
green = cv2.getTrackbarPos('G', 'Palette')
red = cv2.getTrackbarPos('R', 'Palette')
image[:] = [blue, green, red]
cv2.destroyWindow('Palette')
|
py
|
1a5c71812d610b63dcbd12a5941741a9104b7024
|
"""Support for Modbus Register sensors."""
import logging
import struct
from typing import Any, Union
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_OFFSET,
CONF_SLAVE,
CONF_STRUCTURE,
CONF_UNIT_OF_MEASUREMENT,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from . import CONF_HUB, DEFAULT_HUB, DOMAIN as MODBUS_DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_COUNT = "count"
CONF_DATA_TYPE = "data_type"
CONF_PRECISION = "precision"
CONF_REGISTER = "register"
CONF_REGISTER_TYPE = "register_type"
CONF_REGISTERS = "registers"
CONF_REVERSE_ORDER = "reverse_order"
CONF_SCALE = "scale"
DATA_TYPE_CUSTOM = "custom"
DATA_TYPE_FLOAT = "float"
DATA_TYPE_INT = "int"
DATA_TYPE_UINT = "uint"
REGISTER_TYPE_HOLDING = "holding"
REGISTER_TYPE_INPUT = "input"
def number(value: Any) -> Union[int, float]:
"""Coerce a value to number without losing precision."""
if isinstance(value, int):
return value
if isinstance(value, str):
try:
value = int(value)
return value
except (TypeError, ValueError):
pass
try:
value = float(value)
return value
except (TypeError, ValueError):
raise vol.Invalid(f"invalid number {value}")
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_REGISTERS): [
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_REGISTER): cv.positive_int,
vol.Optional(CONF_COUNT, default=1): cv.positive_int,
vol.Optional(CONF_DATA_TYPE, default=DATA_TYPE_INT): vol.In(
[DATA_TYPE_INT, DATA_TYPE_UINT, DATA_TYPE_FLOAT, DATA_TYPE_CUSTOM]
),
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Optional(CONF_OFFSET, default=0): number,
vol.Optional(CONF_PRECISION, default=0): cv.positive_int,
vol.Optional(CONF_REGISTER_TYPE, default=REGISTER_TYPE_HOLDING): vol.In(
[REGISTER_TYPE_HOLDING, REGISTER_TYPE_INPUT]
),
vol.Optional(CONF_REVERSE_ORDER, default=False): cv.boolean,
vol.Optional(CONF_SCALE, default=1): number,
vol.Optional(CONF_SLAVE): cv.positive_int,
vol.Optional(CONF_STRUCTURE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
]
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Modbus sensors."""
sensors = []
data_types = {DATA_TYPE_INT: {1: "h", 2: "i", 4: "q"}}
data_types[DATA_TYPE_UINT] = {1: "H", 2: "I", 4: "Q"}
data_types[DATA_TYPE_FLOAT] = {1: "e", 2: "f", 4: "d"}
for register in config.get(CONF_REGISTERS):
structure = ">i"
if register.get(CONF_DATA_TYPE) != DATA_TYPE_CUSTOM:
try:
structure = ">{}".format(
data_types[register.get(CONF_DATA_TYPE)][register.get(CONF_COUNT)]
)
except KeyError:
_LOGGER.error(
"Unable to detect data type for %s sensor, " "try a custom type",
register.get(CONF_NAME),
)
continue
else:
structure = register.get(CONF_STRUCTURE)
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error(
"Error in sensor %s structure: %s", register.get(CONF_NAME), err
)
continue
if register.get(CONF_COUNT) * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count " "(%d words)",
size,
register.get(CONF_COUNT),
)
continue
hub_name = register.get(CONF_HUB)
hub = hass.data[MODBUS_DOMAIN][hub_name]
sensors.append(
ModbusRegisterSensor(
hub,
register.get(CONF_NAME),
register.get(CONF_SLAVE),
register.get(CONF_REGISTER),
register.get(CONF_REGISTER_TYPE),
register.get(CONF_UNIT_OF_MEASUREMENT),
register.get(CONF_COUNT),
register.get(CONF_REVERSE_ORDER),
register.get(CONF_SCALE),
register.get(CONF_OFFSET),
structure,
register.get(CONF_PRECISION),
)
)
if not sensors:
return False
add_entities(sensors)
class ModbusRegisterSensor(RestoreEntity):
"""Modbus register sensor."""
def __init__(
self,
hub,
name,
slave,
register,
register_type,
unit_of_measurement,
count,
reverse_order,
scale,
offset,
structure,
precision,
):
"""Initialize the modbus register sensor."""
self._hub = hub
self._name = name
self._slave = int(slave) if slave else None
self._register = int(register)
self._register_type = register_type
self._unit_of_measurement = unit_of_measurement
self._count = int(count)
self._reverse_order = reverse_order
self._scale = scale
self._offset = offset
self._precision = precision
self._structure = structure
self._value = None
async def async_added_to_hass(self):
"""Handle entity which will be added."""
state = await self.async_get_last_state()
if not state:
return
self._value = state.state
@property
def state(self):
"""Return the state of the sensor."""
return self._value
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
def update(self):
"""Update the state of the sensor."""
if self._register_type == REGISTER_TYPE_INPUT:
result = self._hub.read_input_registers(
self._slave, self._register, self._count
)
else:
result = self._hub.read_holding_registers(
self._slave, self._register, self._count
)
val = 0
try:
registers = result.registers
if self._reverse_order:
registers.reverse()
except AttributeError:
_LOGGER.error(
"No response from hub %s, slave %s, register %s",
self._hub.name,
self._slave,
self._register,
)
return
byte_string = b"".join([x.to_bytes(2, byteorder="big") for x in registers])
val = struct.unpack(self._structure, byte_string)[0]
val = self._scale * val + self._offset
if isinstance(val, int):
self._value = str(val)
if self._precision > 0:
self._value += "." + "0" * self._precision
else:
self._value = f"{val:.{self._precision}f}"
|
py
|
1a5c7353dfaeba14d292f8e7891fe56e1819df57
|
"""
Calculates real time burn rate for AWS
"""
import pandas as pd
import pprint
from decisionengine.framework.modules import Transform
import logging
"""
IMPORTANT: Please do not change order of these keys and always
append new keys rather than pre-pend or insert.
"""
CONSUMES = ["provisioner_resource_spot_prices",
"AWS_Occupancy"]
PRODUCES = ["AWS_Burn_Rate"]
class AwsBurnRate(Transform.Transform):
def __init__(self, config):
super(AwsBurnRate, self).__init__(config)
self.logger = logging.getLogger()
def transform(self, data_block):
spot_prices = data_block[CONSUMES[0]].fillna(0)
occupancy = data_block[CONSUMES[1]].fillna(0)
burn_df = pd.DataFrame([{"BurnRate": 0.}])
if not occupancy.empty:
df = pd.merge(occupancy,
spot_prices,
how="inner",
on=["AccountName", "AvailabilityZone", "InstanceType"])
if not df.empty:
df["BurnRate"] = pd.to_numeric(
df["RunningVms"]) * pd.to_numeric(df["SpotPrice"])
burn_df = pd.DataFrame([{"BurnRate": df["BurnRate"].sum()}])
return {PRODUCES[0]: burn_df}
def consumes(self, name_list=None):
return CONSUMES
def produces(self, name_schema_id_list=None):
return PRODUCES
def module_config_template():
"""
print a template for this module configuration data
"""
d = {
"AwsBurnRate": {
"module": "modules.AWS.transforms.AwsBurnRate",
"name": "AwsBurnRate",
"parameters": {
}
}
}
print("Entry in channel cofiguration")
pprint.pprint(d)
print("where")
print("\t name - name of the class to be instantiated by task manager")
def module_config_info():
"""
print this module configuration information
"""
print("consumes", CONSUMES)
print("produces", PRODUCES)
module_config_template()
def main():
"""
Call this a a test unit or use as CLI of this module
"""
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--configtemplate",
action="store_true",
help="prints the expected module configuration")
parser.add_argument("--configinfo",
action="store_true",
help="prints config template along with produces and consumes info")
args = parser.parse_args()
if args.configtemplate:
module_config_template()
elif args.configinfo:
module_config_info()
if __name__ == "__main__":
main()
|
py
|
1a5c74f1b26daccb2de77826152b6e99f9534c79
|
from .decorators import *
from .detail_identifiers import *
from .discord import *
from .file_extensions import *
from .misc import *
|
py
|
1a5c7532b7d9ff145b75422e1c8ddfb2caff126f
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import itertools as it, operator as op, functools as ft
from os.path import join, exists
import os, sys, logging, glob, time, select
class OnDemandLogger(object):
log = None
def __getattr__(self, k):
if not self.log: self.log = logging.getLogger('sht.gpio')
return getattr(self.log, k)
log = OnDemandLogger()
path_gpio = '/sys/class/gpio'
class GPIOAccessFailure(Exception): pass
def gpio_access_wrap(func, checks=12, timeout=1.0):
for n in xrange(checks, -1, -1):
try: return func()
except (IOError, OSError): pass
if checks <= 0: break
if n: time.sleep(timeout / checks)
else:
raise GPIOAccessFailure(func, timeout)
# log.warn('gpio access failed (func: %s, timeout: %s)', func, timeout)
def get_pin_path(n, sub=None, _cache=dict()):
n = int(n)
if n not in _cache:
for try_export in [True, False]:
try:
path = join(path_gpio, 'gpio{}'.format(n))
if not exists(path): path, = glob.glob(path + '_*')
except:
if not try_export:
raise OSError('Failed to find sysfs control path for pin: {}'.format(n))
else: break
log.debug('Exporting pin: %s', n)
with open(join(path_gpio, 'export'), 'wb', 0) as dst:
gpio_access_wrap(ft.partial(dst.write, bytes(n)))
_cache[n] = path
else: path = _cache[n]
return path if not sub else os.path.join(path, sub)
def get_pin_value(n, k='value'):
with gpio_access_wrap(
ft.partial(open, get_pin_path(n, k), 'rb', 0) ) as src:
val = src.read().strip()
if k == 'value':
try: val = int(val)
except ValueError as err:
log.warn('Failed to read/decode pin (n: %s) value %r: %s', n, val, err)
val = None
return val
def set_pin_value(n, v, k='value', force=False, _pin_state=dict()):
if k == 'value' and isinstance(v, bool): v = int(v)
if not force and _pin_state.get(n) == v: return
if _pin_state.get(n) == v: return
# log.debug('Setting parameter of pin-%s: %s = %r ', n, k, v)
with gpio_access_wrap(
ft.partial(open, get_pin_path(n, k), 'wb', 0) ) as dst:
gpio_access_wrap(ft.partial(dst.write, bytes(v)))
_pin_state[n] = v
class PollTimeout(Exception): pass
def poll_pin(n, timeout=1.0, edge='both', _poller_cache=dict()):
if edge: set_pin_value(n, k='edge', v=edge)
try:
if n not in _poller_cache:
_poller_cache[n] = select.poll()
poller = _poller_cache[n]
with gpio_access_wrap(
ft.partial(open, get_pin_path(n, 'value'), 'rb', 0) ) as src:
poller.register(src.fileno(), select.POLLPRI | select.POLLERR)
res = poller.poll(timeout * 1000)
if not res or res[0][1] & select.POLLERR == select.POLLERR:
raise PollTimeout(n, timeout, edge, res)
return get_pin_value(n)
finally:
if edge: set_pin_value(n, k='edge', v='none')
|
py
|
1a5c7611c1d3f34a14501692b6924571da588c21
|
from datetime import datetime
from airflow.models import DAG
from airtunnel import PandasDataAsset
from airtunnel.operators.archival import DataAssetArchiveOperator, IngestArchiveOperator
from airtunnel.operators.ingestion import IngestOperator
from airtunnel.operators.loading import StagingToReadyOperator
from airtunnel.operators.transformation import PandasTransformationOperator
from airtunnel.sensors.ingestion import SourceFileIsReadySensor
student = PandasDataAsset("student")
programme = PandasDataAsset("programme")
enrollment = PandasDataAsset("enrollment")
enrollment_summary = PandasDataAsset("enrollment_summary")
with DAG(
dag_id="university",
schedule_interval=None,
start_date=datetime(year=2019, month=9, day=1),
) as dag:
ingested_ready_tasks = set()
# a common stream of tasks for all ingested assets:
for ingested_asset in (student, programme, enrollment):
source_is_ready = SourceFileIsReadySensor(
# we reduce the poke interval to only 3 seconds so that our test runs complete faster
# do not do in production!! :)
asset=ingested_asset,
poke_interval=3,
no_of_required_static_pokes=2,
)
ingest = IngestOperator(asset=ingested_asset)
transform = PandasTransformationOperator(asset=ingested_asset)
archive = DataAssetArchiveOperator(asset=ingested_asset)
staging_to_ready = StagingToReadyOperator(asset=ingested_asset)
ingest_archival = IngestArchiveOperator(asset=ingested_asset)
dag >> source_is_ready >> ingest >> transform >> archive >> staging_to_ready >> ingest_archival
ingested_ready_tasks.add(staging_to_ready)
# upon having loaded the three ingested assets, connect the aggregation downstream to them:
build_enrollment_summary = PandasTransformationOperator(asset=enrollment_summary)
build_enrollment_summary.set_upstream(ingested_ready_tasks)
staging_to_ready = StagingToReadyOperator(asset=enrollment_summary)
dag >> build_enrollment_summary >> staging_to_ready
|
py
|
1a5c768b2671e695367a9c709814a8a80567c871
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkiot.endpoint import endpoint_data
class QuerySolutionProjectRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Iot', '2018-01-20', 'QuerySolutionProject','iot')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_Type(self):
return self.get_body_params().get('Type')
def set_Type(self,Type):
self.add_body_params('Type', Type)
def get_PageId(self):
return self.get_body_params().get('PageId')
def set_PageId(self,PageId):
self.add_body_params('PageId', PageId)
def get_PageSize(self):
return self.get_body_params().get('PageSize')
def set_PageSize(self,PageSize):
self.add_body_params('PageSize', PageSize)
def get_Name(self):
return self.get_body_params().get('Name')
def set_Name(self,Name):
self.add_body_params('Name', Name)
|
py
|
1a5c77602d0e45ab002c01c60327a744cf05189d
|
#!/usr/bin/env python
from collections import defaultdict
import sys
from hunmisc.corpustools.tsv_tools import sentence_iterator, get_dependencies
from common import sanitize_word
def get_node_id_and_word(token):
word, i = token
word = sanitize_word(word)
node_id = "{0}_{1}".format(word, i)
return node_id, word
def deps_to_sen_dict(deps):
root_token = None
sen_dict = defaultdict(dict)
for dep in deps:
gov = (dep['gov']['word'], dep['gov']['id'])
ddep = (dep['dep']['word'], dep['dep']['id'])
dtype = dep['type']
sen_dict[gov][ddep] = dtype
if dtype == 'root':
root_token = gov
return sen_dict, root_token
def dict_to_graph(sen_dict, token):
# print dictionary
global SEEN
global GRAPH_STRING
if token in SEEN:
node_id = SEEN[token]
GRAPH_STRING += node_id
else:
node_id, word = get_node_id_and_word(token)
SEEN[token] = node_id
GRAPH_STRING += "({0} / {1}".format(node_id, word)
for neighbor, edge in sen_dict[token].iteritems():
GRAPH_STRING += ' :{0} '.format(edge.replace(':', '_'))
dict_to_graph(sen_dict, neighbor)
GRAPH_STRING += ')'
HEADER = (
'# IRTG unannotated corpus file, v1.0\n' +
'# interpretation graph: de.up.ling.irtg.algebra.graph.GraphAlgebra')
def main():
print(HEADER)
id_field, word_field, lemma_field, msd_field, gov_field, dep_field = (
0, 1, None, None, -4, -3)
global SEEN
global GRAPH_STRING
with open(sys.argv[1]) as stream:
for sentence in sentence_iterator(stream, comment_tag='#'):
deps = get_dependencies(
sentence, id_field, word_field, lemma_field, msd_field,
gov_field, dep_field)
sentence_dict, root_token = deps_to_sen_dict(deps)
# root token will be the first token if ROOT doesn't exist
if root_token is None:
root_token = sentence_dict.keys()[0]
SEEN = {}
GRAPH_STRING = ''
dict_to_graph(sentence_dict, root_token)
print(GRAPH_STRING)
if __name__ == "__main__":
main()
|
py
|
1a5c777c10e2727ecab3a12b594f2ee551290ac1
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2020 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from collections.abc import Iterable
from datetime import datetime
from enum import Enum
from operator import attrgetter
import numpy as np
from ..core import Entity, HasShapeTileableEnity, ChunkData, Chunk, HasShapeTileableData, \
build_mode, Serializable, OutputType, register_output_types
from ..serialize import ProviderType, ValueType, DataTypeField, ListField, TupleField, \
BoolField, StringField, AnyField
from ..utils import log_unhandled, on_serialize_shape, on_deserialize_shape, is_eager_mode
from .utils import get_chunk_slices, fetch_corner_data
import logging
logger = logging.getLogger(__name__)
class TensorOrder(Enum):
# C order
C_ORDER = 'C'
# Fortran order
F_ORDER = 'F'
class TensorChunkData(ChunkData):
__slots__ = ()
# required fields
_shape = TupleField('shape', ValueType.int64,
on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)
_order = StringField('order', on_serialize=attrgetter('value'), on_deserialize=TensorOrder)
# optional fields
_dtype = DataTypeField('dtype')
def __init__(self, op=None, index=None, shape=None, dtype=None, order=None, **kw):
if isinstance(order, str):
order = getattr(TensorOrder, order)
super().__init__(_op=op, _index=index, _shape=shape, _dtype=dtype, _order=order, **kw)
if self.order is None and self.op is not None:
if len(self.inputs) == 0:
self._order = TensorOrder.C_ORDER
elif all(hasattr(inp, 'order') and inp.order == TensorOrder.F_ORDER
for inp in self.inputs):
self._order = TensorOrder.F_ORDER
else:
self._order = TensorOrder.C_ORDER
@classmethod
def cls(cls, provider):
if provider.type == ProviderType.protobuf:
from ..serialize.protos.tensor_pb2 import TensorChunkDef
return TensorChunkDef
return super().cls(provider)
@property
def params(self):
# params return the properties which useful to rebuild a new chunk
return {
'shape': self.shape,
'dtype': self.dtype,
'order': self.order,
'index': self.index,
}
def __len__(self):
try:
return self.shape[0]
except IndexError:
if build_mode().is_build_mode:
return 0
raise TypeError('len() of unsized object')
@property
def shape(self):
return getattr(self, '_shape', None)
@property
def ndim(self):
return len(self.shape)
@property
def size(self):
return np.prod(self.shape).item()
@property
def dtype(self):
return getattr(self, '_dtype', None) or self.op.dtype
@property
def order(self):
return getattr(self, '_order', None)
@property
def nbytes(self):
return np.prod(self.shape) * self.dtype.itemsize
class TensorChunk(Chunk):
__slots__ = ()
_allow_data_type_ = (TensorChunkData,)
def __len__(self):
return len(self._data)
class TensorData(HasShapeTileableData):
__slots__ = ()
# required fields
_order = StringField('order', on_serialize=attrgetter('value'), on_deserialize=TensorOrder)
# optional fields
_dtype = DataTypeField('dtype')
_chunks = ListField('chunks', ValueType.reference(TensorChunkData),
on_serialize=lambda x: [it.data for it in x] if x is not None else x,
on_deserialize=lambda x: [TensorChunk(it) for it in x] if x is not None else x)
def __init__(self, op=None, shape=None, dtype=None, order=None, nsplits=None, chunks=None, **kw):
if isinstance(order, str):
order = getattr(TensorOrder, order)
super().__init__(_op=op, _shape=shape, _dtype=dtype, _order=order, _nsplits=nsplits,
_chunks=chunks, **kw)
if self.order is None and self.op is not None:
if len(self.inputs) == 0:
self._order = TensorOrder.C_ORDER
elif all(hasattr(inp, 'order') and inp.order == TensorOrder.F_ORDER
for inp in self.inputs):
self._order = TensorOrder.F_ORDER
else:
self._order = TensorOrder.C_ORDER
@classmethod
def cls(cls, provider):
if provider.type == ProviderType.protobuf:
from ..serialize.protos.tensor_pb2 import TensorDef
return TensorDef
return super().cls(provider)
def _to_str(self, representation=False):
if build_mode().is_build_mode or len(self._executed_sessions) == 0:
# in build mode, or not executed, just return representation
if representation:
return 'Tensor <op={}, shape={}, key={}'.format(self._op.__class__.__name__,
self._shape,
self._key)
else:
return 'Tensor(op={}, shape={})'.format(self._op.__class__.__name__,
self._shape)
else:
print_options = np.get_printoptions()
threshold = print_options['threshold']
corner_data = fetch_corner_data(self, session=self._executed_sessions[-1])
# if less than default threshold, just set it as default,
# if not, set to corner_data.size - 1 make sure ... exists in repr
threshold = threshold if self.size <= threshold else corner_data.size - 1
with np.printoptions(threshold=threshold):
corner_str = repr(corner_data) if representation else str(corner_data)
return corner_str
def __str__(self):
return self._to_str(representation=False)
def __repr__(self):
return self._to_str(representation=True)
@property
def params(self):
# params return the properties which useful to rebuild a new tileable object
return {
'shape': self.shape,
'dtype': self.dtype,
'order': self.order
}
@property
def flags(self):
c_order = True if self.ndim <= 1 else self.order == TensorOrder.C_ORDER
f_order = True if self.ndim <= 1 else self.order == TensorOrder.F_ORDER
return {
'C_CONTIGUOUS': c_order,
'F_CONTIGUOUS': f_order
}
@property
def real(self):
from .arithmetic import real
return real(self)
@property
def imag(self):
from .arithmetic import imag
return imag(self)
@property
def dtype(self):
return getattr(self, '_dtype', None) or self.op.dtype
@property
def order(self):
return getattr(self, '_order', None)
@property
def nbytes(self):
return np.prod(self.shape) * self.dtype.itemsize
def get_chunk_slices(self, idx):
return get_chunk_slices(self.nsplits, idx)
def is_scalar(self):
return self.ndim == 0
isscalar = is_scalar
def tosparse(self):
if self.issparse():
return self
from .datasource import fromdense
return fromdense(self)
def todense(self):
if not self.issparse():
return self
from .datasource import fromsparse
return fromsparse(self)
def transpose(self, *axes):
from .base import transpose
if len(axes) == 1 and isinstance(axes[0], Iterable):
axes = axes[0]
return transpose(self, axes)
@property
def T(self):
return self.transpose()
def reshape(self, shape, *shapes, **kw):
from .reshape import reshape
order = kw.pop('order', 'C')
if kw:
raise TypeError(
"'{0}' is an invalid keyword argument for this function".format(tuple(kw)[0]))
if isinstance(shape, Iterable):
shape = tuple(shape)
else:
shape = (shape,)
shape += shapes
return reshape(self, shape, order=order)
def totiledb(self, uri, ctx=None, key=None, timestamp=None):
from .datastore import totiledb
return totiledb(uri, self, ctx=ctx, key=key, timestamp=timestamp)
@staticmethod
def from_dataframe(in_df):
from .datasource import from_dataframe
return from_dataframe(in_df)
def to_dataframe(self, *args, **kwargs):
from ..dataframe.datasource.from_tensor import dataframe_from_tensor
return dataframe_from_tensor(self, *args, **kwargs)
@property
def flat(self):
return flatiter(self)
def to_numpy(self, session=None, **kw):
return self.execute(session=session, **kw).fetch(session=session)
class Tensor(HasShapeTileableEnity):
__slots__ = ()
_allow_data_type_ = (TensorData,)
def __len__(self):
return len(self._data)
@property
def shape(self):
return self._data.shape
@shape.setter
def shape(self, new_shape):
self._data = self._data.reshape(new_shape).data
def _update_shape(self, new_shape):
self._data._update_shape(new_shape)
@property
def real(self):
return self.data.real
@real.setter
def real(self, new_real):
from .arithmetic.setreal import set_real
self._data = set_real(self._data, new_real).data
@property
def imag(self):
return self.data.imag
@imag.setter
def imag(self, new_imag):
from .arithmetic.setimag import set_imag
self._data = set_imag(self._data, new_imag).data
def __array__(self, dtype=None):
if is_eager_mode():
return np.asarray(self.fetch(), dtype=dtype)
else:
return np.asarray(self.execute().fetch(), dtype=dtype)
def __array_function__(self, func, types, args, kwargs):
from .. import tensor as module
for submodule in func.__module__.split('.')[1:]:
try:
module = getattr(module, submodule)
except AttributeError:
return NotImplemented
if not hasattr(module, func.__name__):
return NotImplemented
mars_func = getattr(module, func.__name__)
if mars_func is func:
# avoid Numpy func
return NotImplemented
return mars_func(*args, **kwargs)
def view(self):
return self._view()
@property
def ndim(self):
"""
Number of array dimensions.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([1, 2, 3])
>>> x.ndim
1
>>> y = mt.zeros((2, 3, 4))
>>> y.ndim
3
"""
return super().ndim
def transpose(self, *axes):
"""
Returns a view of the tensor with axes transposed.
For a 1-D tensor, this has no effect. (To change between column and
row vectors, first cast the 1-D tensor into a matrix object.)
For a 2-D tensor, this is the usual matrix transpose.
For an n-D tensor, if axes are given, their order indicates how the
axes are permuted (see Examples). If axes are not provided and
``a.shape = (i[0], i[1], ... i[n-2], i[n-1])``, then
``a.transpose().shape = (i[n-1], i[n-2], ... i[1], i[0])``.
Parameters
----------
axes : None, tuple of ints, or `n` ints
* None or no argument: reverses the order of the axes.
* tuple of ints: `i` in the `j`-th place in the tuple means `a`'s
`i`-th axis becomes `a.transpose()`'s `j`-th axis.
* `n` ints: same as an n-tuple of the same ints (this form is
intended simply as a "convenience" alternative to the tuple form)
Returns
-------
out : Tensor
View of `a`, with axes suitably permuted.
See Also
--------
Tensor.T : Tensor property returning the tensor transposed.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[1, 2], [3, 4]])
>>> a.execute()
array([[1, 2],
[3, 4]])
>>> a.transpose().execute()
array([[1, 3],
[2, 4]])
>>> a.transpose((1, 0))
array([[1, 3],
[2, 4]])
>>> a.transpose(1, 0).execute()
array([[1, 3],
[2, 4]])
"""
return self._data.transpose(*axes)
@property
def T(self):
"""
Same as self.transpose(), except that self is returned if
self.ndim < 2.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.array([[1.,2.],[3.,4.]])
>>> x.execute()
array([[ 1., 2.],
[ 3., 4.]])
>>> x.T.execute()
array([[ 1., 3.],
[ 2., 4.]])
>>> x = mt.array([1.,2.,3.,4.])
>>> x.execute()
array([ 1., 2., 3., 4.])
>>> x.T.execute()
array([ 1., 2., 3., 4.])
"""
return self._data.T
def totiledb(self, uri, ctx=None, key=None, timestamp=None):
return self._data.totiledb(uri, ctx=ctx, key=key, timestamp=timestamp)
def copy(self, order='C'):
return super().copy().astype(self.dtype, order=order, copy=False)
def sort(self, axis=-1, kind=None, parallel_kind=None, psrs_kinds=None, order=None):
"""
Sort a tensor, in-place.
Parameters
----------
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, optional
Sorting algorithm. Default is 'quicksort'.
parallel_kind: {'PSRS'}, optional
Parallel sorting algorithm, for the details, refer to:
http://csweb.cs.wfu.edu/bigiron/LittleFE-PSRS/build/html/PSRSalgorithm.html
psrs_kinds: list with 3 elements, optional
Sorting algorithms during PSRS algorithm.
order : str or list of str, optional
When `a` is a tensor with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
numpy.sort : Return a sorted copy of a tensor.
argsort : Indirect sort.
lexsort : Indirect stable sort on multiple keys.
searchsorted : Find elements in sorted tensor.
partition: Partial sort.
Notes
-----
See ``sort`` for notes on the different sorting algorithms.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([[1,4], [3,1]])
>>> a.sort(axis=1)
>>> a.execute()
array([[1, 4],
[1, 3]])
>>> a.sort(axis=0)
>>> a.execute()
array([[1, 3],
[1, 4]])
Use the `order` keyword to specify a field to use when sorting a
structured tensor:
>>> a = mt.array([('a', 2), ('c', 1)], dtype=[('x', 'S1'), ('y', int)])
>>> a.sort(order='y')
>>> a.execute()
array([('c', 1), ('a', 2)],
dtype=[('x', '|S1'), ('y', '<i4')])
"""
from .base import sort
self._data = sort(self, axis=axis, kind=kind, parallel_kind=parallel_kind,
psrs_kinds=psrs_kinds, order=order).data
def partition(self, kth, axis=-1, kind='introselect', order=None, **kw):
"""
Rearranges the elements in the tensor in such a way that the value of the
element in kth position is in the position it would be in a sorted tensor.
All elements smaller than the kth element are moved before this element and
all equal or greater are moved behind it. The ordering of the elements in
the two partitions is undefined.
Parameters
----------
kth : int or sequence of ints
Element index to partition by. The kth element value will be in its
final sorted position and all smaller elements will be moved before it
and all equal or greater elements behind it.
The order of all elements in the partitions is undefined.
If provided with a sequence of kth it will partition all elements
indexed by kth of them into their sorted position at once.
axis : int, optional
Axis along which to sort. Default is -1, which means sort along the
last axis.
kind : {'introselect'}, optional
Selection algorithm. Default is 'introselect'.
order : str or list of str, optional
When `a` is a tensor with fields defined, this argument specifies
which fields to compare first, second, etc. A single field can
be specified as a string, and not all fields need to be specified,
but unspecified fields will still be used, in the order in which
they come up in the dtype, to break ties.
See Also
--------
mt.partition : Return a parititioned copy of an tensor.
argpartition : Indirect partition.
sort : Full sort.
Notes
-----
See ``mt.partition`` for notes on the different algorithms.
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.array([3, 4, 2, 1])
>>> a.partition(3)
>>> a.execute()
array([2, 1, 3, 4])
>>> a.partition((1, 3))
>>> a.execute()
array([1, 2, 3, 4])
"""
from .base import partition
self._data = partition(self, kth, axis=axis,
kind=kind, order=order, **kw).data
@property
def flat(self):
"""
Flat iterator object to iterate over arrays.
A `flatiter` iterator is returned by ``x.flat`` for any tensor `x`.
It allows iterating over the tensor as if it were a 1-D array,
either in a for-loop or by calling its `next` method.
Iteration is done in row-major, C-style order (the last
index varying the fastest). The iterator can also be indexed using
basic slicing or advanced indexing.
See Also
--------
Tensor.flat : Return a flat iterator over a tensor.
Tensor.flatten : Returns a flattened copy of a tensor.
Examples
--------
>>> import mars.tensor as mt
>>> x = mt.arange(6).reshape(2, 3)
>>> fl = x.flat
>>> fl[2:4].execute()
array([2, 3])
"""
return self._data.flat
def from_dataframe(self, in_df):
return self._data.from_dataframe(in_df)
def to_dataframe(self, *args, **kwargs):
return self._data.to_dataframe(*args, **kwargs)
def to_numpy(self, session=None, **kw):
return self._data.to_numpy(session, **kw)
SparseTensor = Tensor
class flatiter(object):
def __init__(self, tensor):
# flatten creates a copy
self._flatten_tensor = tensor.flatten()
# ravel creates a view
self._ravel_tensor = tensor.ravel()
def __getitem__(self, item):
# a.flat[item] create a copy
return self._flatten_tensor[item]
def __setitem__(self, key, value):
# a.flat[item] = value will apply changes to original tensor
self._ravel_tensor[key] = value
class Indexes(Serializable):
_indexes = AnyField('indexes')
def __init__(self, indexes=None, **kw):
self._indexes = indexes
super().__init__(**kw)
@property
def indexes(self):
return self._indexes
class MutableTensorData(TensorData):
__slots__ = ()
# required fields
_name = StringField('name')
_compression = BoolField("compression")
_chunk_eps = ListField('chunk_eps')
def __init__(self, name=None, op=None, shape=None, dtype=None, key=None, chunk_eps=None,
nsplits=None, chunks=None, **kw):
super().__init__(op=op, shape=shape, dtype=dtype, nsplits=nsplits, chunks=chunks,
_name=name, _key=key, _chunk_eps=chunk_eps, **kw)
@classmethod
def cls(cls, provider):
return super().cls(provider)
def __str__(self):
return 'MutableTensor(op={0}, name={1}, shape={2})'.format(self.op.__class__.__name__,
self.name,
self.shape)
def __repr__(self):
return 'MutableTensor <op={0}, name={1}, shape={2}, key={3}>'.format(self.op.__class__.__name__,
self.name,
self.shape,
self.key)
@property
def params(self):
# params return the properties which useful to rebuild a new tileable object
return {
'shape': self.shape,
'dtype': self.dtype,
'name': self.name,
'compression': self.compression,
"chunk_eps": self.chunk_eps,
}
@property
def name(self):
return getattr(self, '_name', None)
@property
def compression(self):
return getattr(self, '_compression', None)
@property
def chunk_eps(self):
return getattr(self, '_chunk_eps', None)
class MutableTensor(Entity):
__slots__ = ("_chunk_to_endpoint", "_chunk_buffers", "_record_type", "_buffer_size")
_allow_data_type_ = (MutableTensorData,)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._chunk_buffers = defaultdict(lambda: [])
self._record_type = np.dtype([("index", np.uint32), ("ts", np.dtype('datetime64[ns]')), ("value", self.dtype)])
if self.chunks:
self._buffer_size = np.prod(self.chunks[0].shape)
else:
# MutableTensor doesn't hold chunks in LocalSession, thus we don't care the buffer
self._buffer_size = 0
if self._data.chunk_eps is not None:
self._chunk_to_endpoint = dict((c.key, ep) for c, ep in zip(self.chunks, self._data.chunk_eps))
else:
self._chunk_to_endpoint = dict()
def __len__(self):
return len(self._data)
@property
def name(self):
return self._data.name
@property
def chunk_to_endpoint(self):
return self._chunk_to_endpoint
def __setitem__(self, index, value):
from ..session import Session
session = Session.default_or_local()
return session.write_mutable_tensor(self, index, value)
def seal(self):
from ..session import Session
session = Session.default_or_local()
return session.seal(self)
@log_unhandled
def _do_write(self, tensor_index, value):
''' Notes [buffer management of mutable tensor]:
Write operations on a mutable tensor are buffered at client. Every chunk has a
corresponding buffer in the form of
{chunk_key: [(index, ts, value)]}
Every time we write to a chunk, we will append the new operation records to
the list
At the end of write, if the buffer size exceeds `buffer_size`, the buffer will be send
to the corresponding worker.
The insights for above design are:
1. `append` on (small) list is fast
2. We try to flush the (affected) buffer to worker at the end of every write, the buffer
size is guaranteed to less than 2 * chunk_size.
'''
from .indexing.core import process_index, calc_shape
from .indexing.getitem import TensorIndex
from .utils import setitem_as_records
tensor_index = process_index(self.ndim, tensor_index)
output_shape = calc_shape(self.shape, tensor_index)
index_tensor_op = TensorIndex(dtype=self.dtype, sparse=False, indexes=tensor_index)
index_tensor = index_tensor_op.new_tensor([self], tuple(output_shape))._inplace_tile()
output_chunks = index_tensor.chunks
is_scalar = np.isscalar(value) or isinstance(value, tuple) and self.dtype.fields
if not is_scalar:
value = np.broadcast_to(value, output_shape).astype(self.dtype)
nsplits_acc = [np.cumsum((0,) + tuple(c.shape[i] for c in output_chunks
if all(idx == 0 for j, idx in enumerate(c.index) if j != i)))
for i in range(len(output_chunks[0].shape))]
now = np.datetime64(datetime.now())
affected_chunk_keys = []
for output_chunk in output_chunks:
records = self._chunk_buffers[output_chunk.op.input.key]
records += setitem_as_records(nsplits_acc, output_chunk, value, now, is_scalar=is_scalar)
affected_chunk_keys.append(output_chunk.op.input.key)
# Try to flush affected chunks
return self._do_flush(self._buffer_size, affected_chunk_keys)
@log_unhandled
def _do_flush(self, buffer_size_limit=1, affected_chunk_keys=None):
chunk_records_to_send = []
affected_chunk_keys = affected_chunk_keys or self._chunk_buffers.keys()
for chunk_key in affected_chunk_keys:
records = self._chunk_buffers[chunk_key]
if len(records) >= buffer_size_limit:
chunk_records_to_send.append((chunk_key, self._chunk_to_endpoint[chunk_key],
np.array(records, dtype=self._record_type)))
self._chunk_buffers[chunk_key] = []
return chunk_records_to_send
def mutable_tensor(name, shape=None, dtype=np.float_, fill_value=None, chunk_size=None):
"""
Create or get a mutable tensor using the local or default session.
When `shape` is `None`, it will try to get the mutable tensor with name `name`. Otherwise,
it will try to create a mutable tensor using the provided `name` and `shape`.
Parameters
----------
name : str
Name of the mutable tensor.
shape : int or sequence of ints
Shape of the new mutable tensor, e.g., ``(2, 3)`` or ``2``.
dtype : data-type, optional
The desired data-type for the mutable tensor, e.g., `mt.int8`. Default is `mt.float_`.
chunk_size: int or tuple of ints, optional
Specifies chunk size for each dimension.
fill_value: scalar, optional
The created mutable tensor will be filled by `fill_value` defaultly, if the parameter is None,
the newly created mutable tensor will be initialized with `np.zeros`. See also `numpy.full`.
"""
from ..session import Session
session = Session.default_or_local()
if shape is None:
return session.get_mutable_tensor(name)
else:
return session.create_mutable_tensor(name, shape=shape, dtype=dtype,
fill_value=fill_value, chunk_size=chunk_size)
TENSOR_TYPE = (Tensor, TensorData)
TENSOR_CHUNK_TYPE = (TensorChunk, TensorChunkData)
register_output_types(OutputType.tensor, TENSOR_TYPE, TENSOR_CHUNK_TYPE)
register_output_types(OutputType.scalar, TENSOR_TYPE, TENSOR_CHUNK_TYPE)
|
py
|
1a5c77b9396ad8ef5ebcfbe2a0916c657568f8f5
|
import numpy as np
import requests
import os
from PIL import Image, ImageOps
from pathlib import Path
from retrofy.configs import Filter_Configs
import retrofy.utils as utils
CONFIGS = Filter_Configs()
class Filter():
MAX_SIZE = CONFIGS.MAXS["size"]
def __init__(self, img_src):
if isinstance(img_src, (str, Image.Image)) == False:
raise TypeError("Parameter 'img_src' must be a string or a Pillow Image object.")
self.__img_src = img_src
self.__last_modifications = [] #list for all modifications that wasnt undoed
self.__last_undos = []
self.__load_image()
@property
def modified_img(self):
return self.__modified_img
@modified_img.setter
def modified_img(self, img):
if isinstance(img, Image.Image) == False:
raise TypeError("Parameter 'modified_img' must be a Pillow Image object.")
self.__modified_img = img
self.__last_modifications.append(self.__modified_img)
@property
def original_img(self):
return self.__original_img
@property
def last_modifications(self):
return self.__last_modifications
def __load_image(self):
# if img_src alredy is a PIL Image object
if isinstance(self.__img_src, Image.Image) == True:
self.__original_img = self.__img_src
else:
if utils.is_url(self.__img_src) == True:
try:
self.__img_src = requests.get(self.__img_src, stream=True).raw
self.__original_img = Image.open(self.__img_src).convert("RGB")
except:
raise ValueError("Could not download image from URL '{}'.".format(self.__img_src))
else:
try:
self.__original_img = Image.open(self.__img_src).convert("RGB")
except:
raise ValueError("Could not access image on file '{}'.".format(self.__img_src))
#resize large images (with same aspect ratio)
self.__original_img.thumbnail(self.MAX_SIZE)
self.__modified_img = self.__original_img
def undo(self, times=1):
if isinstance(times, int) == False:
raise TypeError("Parameter 'times' must be an integer.")
if len(self.__last_modifications) > 0:
for i in range(times):
if len(self.__last_modifications) > 0:
self.__last_undos.append(self.__last_modifications[-1])
self.__last_modifications.pop(-1)
if len(self.__last_modifications) == 0:
self.reset()
else:
self.__modified_img = self.__last_modifications[-1]
def redo(self, times=1):
if isinstance(times, int) == False:
raise TypeError("Parameter 'times' must be an integer.")
if len(self.__last_undos) > 0:
for i in range(times):
if len(self.__last_undos) > 0:
self.modified_img = self.__last_undos[-1]
self.__last_undos.pop(-1)
def reset(self):
self.__modified_img = self.__original_img
def show(self, original=False):
if isinstance(original, bool) == False:
raise TypeError("Parameter 'original' must be a boolean.")
if original == False:
self.__modified_img.show()
else:
self.__original_img.show()
def save(self, path, original=False):
if isinstance(original, bool) == False:
raise TypeError("Parameter 'original' must be a boolean.")
if isinstance(path, str) == False and isinstance(file_path, Path) == False:
raise TypeError("Parameter 'path' must be a string or a Path object.")
path = Path(path)
if path.suffix == "":
path = path.parent / Path(path.stem + ".png")
if self.__modified_img.mode == "RGBA" and path.suffix != ".png":
raise ValueError("RGBA Image must have 'png' file extension.")
try:
if original == False:
self.__modified_img.save(path)
else:
self.__original_img.save(path)
except:
raise ValueError("Could not save image on especified path.")
|
py
|
1a5c77c31dcde499578f6e927a312a8f3bbce481
|
# coding: utf-8
"""
OpenAPI Petstore */ ' \" =end -- \\r\\n \\n \\r
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ */ ' \" =end -- # noqa: E501
OpenAPI spec version: 1.0.0 */ ' \" =end -- \\r\\n \\n \\r
Contact: [email protected] */ ' \" =end -- \\r\\n \\n \\r
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from petstore_api.api_client import ApiClient
class FakeApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def test_code_inject____end__rn_n_r(self, **kwargs): # noqa: E501
"""To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_code_inject____end__rn_n_r(async_req=True)
>>> result = thread.get()
:param async_req bool
:param UNKNOWN_BASE_TYPE unknown_base_type:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.test_code_inject____end__rn_n_r_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.test_code_inject____end__rn_n_r_with_http_info(**kwargs) # noqa: E501
return data
def test_code_inject____end__rn_n_r_with_http_info(self, **kwargs): # noqa: E501
"""To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
To test code injection */ ' \" =end -- \\r\\n \\n \\r # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.test_code_inject____end__rn_n_r_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param UNKNOWN_BASE_TYPE unknown_base_type:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['unknown_base_type'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method test_code_inject____end__rn_n_r" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'unknown_base_type' in local_var_params:
body_params = local_var_params['unknown_base_type']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json', '*/ \" =end -- ']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/fake', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
|
py
|
1a5c78b4a8702704b0e33b6e831d685ca9d54fe3
|
"""Test the rig module."""
import numpy as np
from opensfm import pygeometry, rig, types
def test_create_instances_with_patterns() -> None:
# A first rig model defined as left/right/top/bottom
instance1 = [
"12345_left.jpg",
"12345_bottom.jpg",
"12345_top.jpg",
"12345_right.jpg",
]
instance2 = [
"1234567_left.jpg",
"1234567_bottom.jpg",
"1234567_top.jpg",
"1234567_right.jpg",
]
patterns_12 = {
"camera_left": "(left)",
"camera_right": "(right)",
"camera_top": "(top)",
"camera_bottom": "(bottom)",
}
# A second one as RED/GREEN/BLUE
instance3 = [
"RED_SENSOR_001-12345678.jpg",
"GREEN_SENSOR_002-12345678.jpg",
"BLUE_SENSOR_003-12345678.jpg",
]
patterns_3 = {
"red": "(RED_SENSOR_001)",
"green": "(GREEN_SENSOR_002)",
"blue": "(BLUE_SENSOR_003)",
}
# Run detection with these two rig model patterns
rig_patterns = patterns_12
rig_patterns.update(patterns_3)
instances = rig.create_instances_with_patterns(
instance1 + instance2 + instance3, rig_patterns
)
# Ensure we have 2 instance for the first rig, and 1 for the second
assert len(instances) == 3
recovered_instance1 = instances["12345_.jpg"]
assert [x[0] for x in recovered_instance1] == instance1
recovered_instance2 = instances["1234567_.jpg"]
assert [x[0] for x in recovered_instance2] == instance2
recovered_instance3 = instances["-12345678.jpg"]
assert [x[0] for x in recovered_instance3] == instance3
def test_compute_relative_pose() -> None:
# 4-cameras rig
camera1 = pygeometry.Camera.create_spherical()
camera1.id = "camera1"
camera2 = pygeometry.Camera.create_spherical()
camera2.id = "camera2"
camera3 = pygeometry.Camera.create_spherical()
camera3.id = "camera3"
camera4 = pygeometry.Camera.create_spherical()
camera4.id = "camera4"
# a bit cumbersome that we need to have some reconstruction
rec = types.Reconstruction()
rec.add_camera(camera1)
rec.add_camera(camera2)
rec.add_camera(camera3)
rec.add_camera(camera4)
# First rig instance
rec.create_shot("shot1", "camera1", pygeometry.Pose([0, 0, 0], [-2, -2, 0]))
rec.create_shot("shot2", "camera2", pygeometry.Pose([0, 0, 0], [-3, -3, 0]))
rec.create_shot("shot3", "camera3", pygeometry.Pose([0, 0, 0], [-1, -3, 0]))
rec.create_shot("shot4", "camera4", pygeometry.Pose([0, 0, 0], [-2, -4, 0]))
# Second rig instance (rotated by pi/2 around Z)
pose_instance = pygeometry.Pose([0, 0, -1.5707963])
pose_instance.set_origin([-6, 0, 0])
rec.create_shot("shot5", "camera1", pose_instance)
pose_instance.set_origin([-7, 1, 0])
rec.create_shot("shot6", "camera2", pose_instance)
pose_instance.set_origin([-7, -1, 0])
rec.create_shot("shot7", "camera3", pose_instance)
pose_instance.set_origin([-8, 0, 0])
rec.create_shot("shot8", "camera4", pose_instance)
pose_instances = [
[
(
rec.shots["shot1"],
"camera_id_1",
),
(
rec.shots["shot2"],
"camera_id_2",
),
(
rec.shots["shot3"],
"camera_id_3",
),
(
rec.shots["shot4"],
"camera_id_4",
),
],
[
(
rec.shots["shot5"],
"camera_id_1",
),
(
rec.shots["shot6"],
"camera_id_2",
),
(
rec.shots["shot7"],
"camera_id_3",
),
(
rec.shots["shot8"],
"camera_id_4",
),
],
]
# Compute rig cameras poses
rig_cameras = rig.compute_relative_pose(pose_instances)
assert np.allclose(
[0, -1, 0], rig_cameras["camera_id_1"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[1, 0, 0], rig_cameras["camera_id_2"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[-1, 0, 0], rig_cameras["camera_id_3"].pose.get_origin(), atol=1e-7
)
assert np.allclose(
[0, 1, 0], rig_cameras["camera_id_4"].pose.get_origin(), atol=1e-7
)
|
py
|
1a5c795ac998e642a3668791b8db14a77e6aeca2
|
# class Solution:
# def getSmallestString(self, n: int, k: int) -> str:
# '''
# Solution with heap implementation
# '''
# # create a lookup table
# import string
# # tb1 = {(i+1):char for i,char in enumerate(string.ascii_lowercase)}
# # exception
# if n==1:
# return string.ascii_lowercase[k-1]
# import heapq
# hp = [1 for i in range(n)]
# heapq.heapify(hp)
# total = n
# while total <= k:
# if total == k:
# return ''.join([string.ascii_lowercase[heapq.heappop(hp)-1] for i in range(n)])
# if k - total > 26:
# out = heapq.heappushpop(hp, 26)
# total = total - out + 26
# else:
# out = heapq.heappop(hp)
# total -= out
# if k - total > 26:
# heapq.heappush(hp, 26)
# total += 26
# else:
# heapq.heappush(hp, k - total)
# total += (k - total)
# class Solution:
# def getSmallestString(self, n: int, k: int) -> str:
# '''
# Solution with simple loop, one pass. Memory about the same, run time much shorter
# '''
# source = 'abcdefghijklmnopqrstuvwxyz'
# raw = [1 for i in range(n)]
# total = n
# i = n-1
# while i >= 0:
# if total == k:
# return ''.join([source[v-1] for v in raw])
# if k - total > 26:
# total -= raw[i]
# raw[i] = 26
# total += 26
# else:
# total -= raw[i]
# if k - total > 26:
# raw[i] = 26
# total += raw[i]
# else:
# raw[i] = k - total
# total += k - total
# i -= 1
# return ''.join([source[v-1] for v in raw])
# class Solution:
# def getSmallestString(self, n: int, k: int) -> str:
# '''
# Solution with direct accumulation, much less memory, but still long run time
# '''
# source = 'abcdefghijklmnopqrstuvwxyz'
# output = ''
# total = 0
# i = n-1
# while i >= 0:
# if total == k:
# return output
# if k - i - total > 26:
# output = 'z' + output
# total += 26
# else:
# output = source[k - i - total - 1] + output
# total += k - i - total
# i -= 1
# return output
class Solution:
def getSmallestString(self, n, k):
source = 'abcdefghijklmnopqrstuvwxyz'
num_z = (k - n) // 25
print(num_z)
if (k - n) % 25 == 0:
return 'a' * (n - num_z) + 'z' * num_z
else:
return 'a' * (n - num_z - 1) + source[(k - n) % 25] + 'z' * num_z
|
py
|
1a5c79a0328ad1e2b78b543172389686f669a45f
|
a=[[7,0], [4,4], [7,1], [5,0], [6,1], [5,2]]
a.sort(key=lambda x: (x[0],-x[1]))
from collections import deque
from bisect import insort
o=[]
while a:
p=a.pop()
o.insert(p[1],p)
print o
|
py
|
1a5c7a7e592ef59bd800f4efbc6a39f7e7a18cbb
|
'''
MIT License
Copyright (c) 2020 Futurewei Cloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import configparser
class Runnable:
def __init__(self):
self.name = ""
self.host = ""
self.image = ""
self.docker_args = ""
self.program_args = ""
def __str__(self):
return "Name: " + self.name + \
"\nHost: " + self.host + \
"\nImage: " + self.image + \
"\nDocker args: " + self.docker_args + \
"\nProgram args: " + self.program_args
def getDockerPull(self):
return "sudo docker pull " + self.image
def getDockerRun(self):
return "sudo docker run " + self.docker_args + " " + self.program_args
def getDockerStop(self):
return "sudo docker stop -t 30 " + self.name
def getDockerRemove(self):
return "sudo docker container rm " + self.name
def getDockerLogs(self):
return "sudo docker logs --tail 5000 " + self.name
def parseRunnableConfig(locals_filename, runnable, config_files, cpus, cpus_base):
binary = ""
parsed_args = []
for filename in config_files.split(' '):
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
config.read([locals_filename]+[filename])
if "binary" in config["deployment"]:
binary = config["deployment"]["binary"]
if "image" in config["deployment"]:
runnable.image = config["deployment"]["image"]
if "docker_args" in config["deployment"]:
runnable.docker_args += config["deployment"]["docker_args"] + " "
for arg in config["program_args"]:
if arg in parsed_args:
continue
parsed_args.append(arg)
value = config["program_args"][arg]
if value == "$cpus":
value = str(cpus)
if value == "$cpus_expand":
value = str(cpus_base) + "-" + str(cpus_base+cpus-1)
runnable.program_args += "--" + arg + " " + value + " "
runnable.docker_args += "--name " + runnable.name + " " + runnable.image + " " + binary
def parseConfig(locals_filename, config_filename):
config = configparser.ConfigParser(interpolation=configparser.ExtendedInterpolation())
config.read([locals_filename, config_filename])
all_to_run = []
cpuset_base = int(config["LocalConfig"]["cpuset_base"])
for section in config.sections():
if (section == "LocalConfig"):
continue
hosts = config[section]["hosts"].split(' ')
for host in hosts:
runnable = Runnable()
runnable.name = section
runnable.host = host
parseRunnableConfig(locals_filename, runnable, config[section]["configs"], int(config[section]["cpus"]), cpuset_base)
all_to_run.append(runnable)
return all_to_run
|
py
|
1a5c7b03ae9d7b77613daf937a4c665d90826369
|
#!/usr/bin/env python
# coding: utf-8
# # Data distribution check
# In[14]:
import numpy as np
import os
import pandas as pd
import matplotlib.pyplot as plt
#import umap as umap
"""
# In[24]:
# Read the old and new data
old = pd.read_csv('data_x_old.csv', header=None,sep=' ',dtype='float')
old.info()
old = old.values
new = pd.read_csv('data_x.csv', header=None,sep=' ',dtype='float')
new.info()
new = new.values
# ## Histogram
# In[29]:
# Plot the histogram of data
def histogram_plot(data, dim):
f = plt.figure()
# Determine if this is a new data
if np.shape(data)[0] == 17500:
new_flag = True
name = 'new'
else:
new_flag = False
name = 'old'
# Plot the histogram
plt.hist(data[:, dim],bins=100)
plt.title('histogram of axim {} of {} data '.format(dim, name))
plt.ylabel('cnt')
plt.xlabel('axis {}'.format(dim))
plt.savefig('histogram of axim {} of {} data.png'.format(dim, name))
# In[30]:
for i in range(8):
histogram_plot(new, i)
histogram_plot(old, i)
# ## Clustering
# In[31]:
data_all = np.concatenate([old, new])
reducer = umap.UMAP()
embedding = reducer.fit_transform(data_all)
embedding.shape
# In[37]:
# Plot the umap graph
lo = len(old)
ln = len(new)
label_all = np.zeros([lo + ln, ])
label_all[lo:] = 1
f = plt.figure()
plt.scatter(embedding[:lo, 0], embedding[:lo, 1], label='old',s=1)
plt.legend()
plt.xlabel('u1')
plt.ylabel('u2')
plt.title('umap plot for old data')
plt.savefig('umap plot for old data.png')
f = plt.figure()
plt.scatter(embedding[lo:, 0], embedding[lo:, 1], label='new',s=1)
plt.legend()
plt.xlabel('u1')
plt.ylabel('u2')
plt.title('umap plot for new data')
plt.savefig('umap plot for new data.png')
f = plt.figure()
plt.scatter(embedding[:lo, 0], embedding[:lo, 1], label='old',s=1)
plt.scatter(embedding[lo:, 0], embedding[lo:, 1], label='new',s=1)
plt.legend()
plt.xlabel('u1')
plt.ylabel('u2')
plt.title('umap plot for old data and new data')
plt.savefig('umap plot for old data and new data.png')
# ## Visualization
#
# In[12]:
def plot_scatter(old, new, dim1, dim2):
f = plt.figure()
plt.scatter(old[:, dim1], old[:, dim2], label='old',marker='x')#,s=10)
plt.scatter(new[:, dim1], new[:, dim2], label='new',marker='.')#,s=5)
plt.legend()
plt.xlabel('dim {}'.format(dim1))
plt.ylabel('dim {}'.format(dim2))
plt.title('scatter plot of dim{},{} of old and new data'.format(dim1, dim2))
plt.savefig('scatter plot of dim{},{} of old and new data.png'.format(dim1, dim2))
# In[15]:
for i in range(8):
for j in range(8):
if i == j:
continue
plot_scatter(old, new, i, j)
plt.close('all')
# ## Pair-wise scatter plot
# In[19]:
df_old = pd.DataFrame(old)
df_new = pd.DataFrame(new)
psm = pd.plotting.scatter_matrix(df_old, figsize=(15, 15), s=10)
# ## Find the same and plot spectra
# In[38]:
i = 0
for i in range(len(old)):
#print(old[i,:])
new_minus = np.sum(np.square(new - old[i,:]),axis=1)
#print(np.shape(new_minus))
match = np.where(new_minus==0)
#print(match)
if np.shape(match)[1] != 0: #There is a match
print('we found a match! new index {} and old index {} match'.format(match, i))
# In[39]:
print('old index ', old[11819,:])
print('new index ', new[5444,:])
# In[35]:
np.shape(match)
# ### Plot the matched spectra
# In[6]:
y_old = pd.read_csv('data_y_old.csv',header=None,sep=' ')
# In[42]:
y_new = pd.read_csv('data_y_new.csv',header=None,sep=' ')
# In[7]:
y_old = y_old.values
y_new = y_new.values
# In[45]:
# plot the spectra
old_index = 11819
new_index = 5444
f = plt.figure()
plt.plot(y_old[old_index,:],label='old geometry {}'.format(old[old_index, :]))
plt.plot(y_new[new_index,:],label='new geometry {}'.format(new[new_index, :]))
plt.legend()
plt.ylabel('transmission')
plt.xlabel('THz')
plt.savefig('Spectra plot for identicle point')
# # Conclusion, this simulation is not the same as before ...
# ### See what percentage are still within range
# In[36]:
#print(old)
#print(new)
hmax = np.max(old[:,0])
hmin = np.min(old[:,1])
rmax = np.max(old[:,4])
rmin = np.min(old[:,4])
print(hmax, hmin, rmax, rmin)
#hmax = np.max(new[:,0])
#hmin = np.min(new[:,1])
#rmax = np.max(new[:,4])
#rmin = np.min(new[:,4])
#print(hmax, hmin, rmax, rmin)
within_range = np.ones([len(new)])
new_minus = np.copy(new)
new_minus[:,:4] -= hmin
new_minus[:,4:] -= rmin
new_plus = np.copy(new)
new_plus[:, :4] -= hmax
new_plus[:, 4:] -= rmax
small_flag = np.min(new_minus, axis=1) < 0
big_flag = np.max(new_plus, axis=1) > 0
within_range[small_flag] = 0
within_range[big_flag] = 0
print(np.sum(within_range) / len(within_range))
print(type(within_range))
print(np.shape(within_range))
print(within_range)
print(new[np.arange(len(within_range))[within_range.astype('bool')],:])
print(np.sum(within_range))
# # Data augmentation
# ## Since the geometry is symmetric, we can augment the data with permutations
# In[13]:
# Check the assumption that the permutation does indeed give you the same spectra
# Check if there is same spectra
i = 0
for i in range(len(y_old)):
#print(old[i,:])
new_minus = np.sum(np.square(y_old - y_old[i,:]),axis=1)
#print(np.shape(new_minus))
match = np.where(new_minus==0)
#print(match)
#print(np.shape(match))
#print(len(match))
#if match[0]
if len(match) != 1:#np.shape(match)[1] != 0: #There is a match
print('we found a match! new index {} and old index {} match'.format(match, i))
# ### Due to physical periodic boundary condition, we can augment the data by doing permutations
# In[39]:
"""
def permutate_periodicity(geometry_in, spectra_in):
"""
:param: geometry_in: numpy array of geometry [n x 8] dim
:param: spectra_in: spectra of the geometry_in [n x k] dim
:return: output of the augmented geometry, spectra [4n x 8], [4n x k]
"""
# Get the dimension parameters
(n, k) = np.shape(spectra_in)
# Initialize the output
spectra_out = np.zeros([4*n, k])
geometry_out = np.zeros([4*n, 8])
#################################################
# start permutation of geometry (case: 1 - 0123)#
#################################################
# case:2 -- 1032
geometry_c2 = geometry_in[:, [1,0,3,2,5,4,7,6]]
# case:3 -- 2301
geometry_c3 = geometry_in[:, [2,3,0,1,6,7,4,5]]
# case:4 -- 3210
geometry_c4 = geometry_in[:, [3,2,1,0,7,6,5,4]]
geometry_out[0*n:1*n, :] = geometry_in
geometry_out[1*n:2*n, :] = geometry_c2
geometry_out[2*n:3*n, :] = geometry_c3
geometry_out[3*n:4*n, :] = geometry_c4
for i in range(4):
spectra_out[i*n:(i+1)*n,:] = spectra_in
return geometry_out, spectra_out
# In[40]:
data_folder = '/work/sr365/Christian_data/dataIn'
data_out_folder = '/work/sr365/Christian_data_augmented'
for file in os.listdir(data_folder):
data = pd.read_csv(os.path.join(data_folder, file),header=None,sep=',').values
(l, w) = np.shape(data)
g = data[:,2:10]
s = data[:,10:]
g_aug, s_aug = permutate_periodicity(g, s)
output = np.zeros([l*4, w])
output[:, 2:10] = g_aug
output[:, 10:] = s_aug
np.savetxt(os.path.join(data_out_folder, file+'_augmented.csv'),output,delimiter=',')
# In[41]:
#print(np.shape(g))
# In[ ]:
|
py
|
1a5c7b8a28192ca6d1e86f44d9f825ec08faabf0
|
# -*- coding: utf-8 -*-
# BSD 3-Clause License
#
# Copyright (c) 2017, Nokia
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
NUSimVRS
"""
from vspk import v5_0 as vsdk
from nuagevsdsim.simentities.nusimresource import NUSimResource
class NUSimVRS(NUSimResource):
""" Represents a VRS
Notes:
System Monitoring details for VRS connected to VSC or HSC
"""
__vspk_class__ = vsdk.NUVRS
__unique_fields__ = ['externalID']
__mandatory_fields__ = []
__default_fields__ = {
}
__get_parents__ = ['container', 'hsc', 'me', 'vm', 'vport', 'vsc']
__create_parents__ = []
def __init__(self):
super(NUSimVRS, self).__init__()
|
py
|
1a5c7bce4df6ba7fdd16868febc4b8f9158557f0
|
'''
Min, Max, and Sorting Dictionaries:
'''
stocks={
'GOOD':520.24,
'FB':76.45,
'YHOO':39.28,
'AMZN':306.21,
'AAPL':99.76
}
print(sorted(zip(stocks.keys(),stocks.values())))
|
py
|
1a5c7d181cd15c5a9b7336616bb19cb36ebbf1c1
|
# inter-node communication
from collections import defaultdict
from enum import IntEnum, unique
from plenum.common.plenum_protocol_version import PlenumProtocolVersion
from plenum.common.roles import Roles
from plenum.common.transactions import PlenumTransactions
NOMINATE = "NOMINATE"
REELECTION = "REELECTION"
PRIMARY = "PRIMARY"
PRIMDEC = "PRIMARYDECIDED"
BATCH = "BATCH"
REQACK = "REQACK"
REQNACK = "REQNACK"
REJECT = "REJECT"
POOL_LEDGER_TXNS = "POOL_LEDGER_TXNS"
PROPAGATE = "PROPAGATE"
PREPREPARE = "PREPREPARE"
OLD_VIEW_PREPREPARE_REQ = "OLD_VIEW_PREPREPARE_REQ"
OLD_VIEW_PREPREPARE_REP = "OLD_VIEW_PREPREPARE_REP"
PREPARE = "PREPARE"
COMMIT = "COMMIT"
CHECKPOINT = "CHECKPOINT"
CHECKPOINT_STATE = "CHECKPOINT_STATE"
THREE_PC_STATE = "THREE_PC_STATE"
UPDATE_BLS_MULTI_SIG = "UPDATE_BLS_MULTI_SIG"
REPLY = "REPLY"
ORDERED = "ORDERED"
REQKEY = "REQKEY"
INSTANCE_CHANGE = "INSTANCE_CHANGE"
BACKUP_INSTANCE_FAULTY = "BACKUP_INSTANCE_FAULTY"
VIEW_CHANGE_DONE = "VIEW_CHANGE_DONE"
CURRENT_STATE = "CURRENT_STATE"
VIEW_CHANGE = "VIEW_CHANGE"
VIEW_CHANGE_ACK = "VIEW_CHANGE_ACK"
NEW_VIEW = "NEW_VIEW"
LEDGER_STATUS = "LEDGER_STATUS"
CONSISTENCY_PROOF = "CONSISTENCY_PROOF"
CATCHUP_REQ = "CATCHUP_REQ"
CATCHUP_REP = "CATCHUP_REP"
MESSAGE_REQUEST = 'MESSAGE_REQUEST'
MESSAGE_RESPONSE = 'MESSAGE_RESPONSE'
OBSERVED_DATA = 'OBSERVED_DATA'
BATCH_COMMITTED = 'BATCH_COMMITTED'
VIEW_CHANGE_START = 'ViewChangeStart'
VIEW_CHANGE_CONTINUE = 'ViewChangeContinue'
BLACKLIST = "BLACKLIST"
THREE_PC_PREFIX = "3PC: "
MONITORING_PREFIX = "MONITORING: "
VIEW_CHANGE_PREFIX = "VIEW CHANGE: "
CATCH_UP_PREFIX = "CATCH-UP: "
PRIMARY_SELECTION_PREFIX = "PRIMARY SELECTION: "
BLS_PREFIX = "BLS: "
OBSERVER_PREFIX = "OBSERVER: "
PROPOSED_VIEW_NO = "proposed_view_no"
NAME = "name"
VERSION = "version"
IP = "ip"
PORT = "port"
KEYS = "keys"
TYPE = "type"
TXN_TYPE = "type"
TXN_ID = "txnId"
ORIGIN = "origin"
# Use f.IDENTIFIER.nm
IDENTIFIER = "identifier"
TARGET_NYM = "dest"
DATA = "data"
RAW = "raw"
ENC = "enc"
HASH = "hash"
ALIAS = "alias"
PUBKEY = "pubkey"
VERKEY = "verkey"
BLS_KEY = "blskey"
BLS_KEY_PROOF = "blskey_pop"
NYM_KEY = "NYM"
NODE_IP = "node_ip"
NODE_PORT = "node_port"
CLIENT_IP = "client_ip"
CLIENT_PORT = "client_port"
# CHANGE_HA = "CHANGE_HA"
# CHANGE_KEYS = "CHANGE_KEYS"
SERVICES = "services"
VALIDATOR = "VALIDATOR"
CLIENT = "CLIENT"
ROLE = 'role'
NONCE = 'nonce'
ATTRIBUTES = 'attributes'
VERIFIABLE_ATTRIBUTES = 'verifiableAttributes'
PREDICATES = 'predicates'
TXN_TIME = 'txnTime'
TXN_DATA = "txnData"
LAST_TXN = "lastTxn"
TXNS = "Txns"
BY = "by"
FORCE = 'force'
AML_VERSION = 'version'
AML = 'aml'
AML_CONTEXT = 'amlContext'
AUDIT_TXN_VIEW_NO = "viewNo"
AUDIT_TXN_PP_SEQ_NO = "ppSeqNo"
AUDIT_TXN_LEDGERS_SIZE = "ledgerSize"
AUDIT_TXN_LEDGER_ROOT = "ledgerRoot"
AUDIT_TXN_STATE_ROOT = "stateRoot"
AUDIT_TXN_PRIMARIES = "primaries"
AUDIT_TXN_DIGEST = "digest"
AUDIT_TXN_NODE_REG = "nodeReg"
# State proof fields
STATE_PROOF = 'state_proof'
ROOT_HASH = "root_hash"
MULTI_SIGNATURE = "multi_signature"
PROOF_NODES = "proof_nodes"
VALUE = 'value'
MULTI_SIGNATURE_SIGNATURE = 'signature'
MULTI_SIGNATURE_PARTICIPANTS = 'participants'
MULTI_SIGNATURE_VALUE = 'value'
MULTI_SIGNATURE_VALUE_LEDGER_ID = 'ledger_id'
MULTI_SIGNATURE_VALUE_STATE_ROOT = 'state_root_hash'
MULTI_SIGNATURE_VALUE_TXN_ROOT = 'txn_root_hash'
MULTI_SIGNATURE_VALUE_POOL_STATE_ROOT = 'pool_state_root_hash'
MULTI_SIGNATURE_VALUE_TIMESTAMP = 'timestamp'
# ROLES
IDENTITY_OWNER = Roles.IDENTITY_OWNER.value
STEWARD = Roles.STEWARD.value
TRUSTEE = Roles.TRUSTEE.value
IDENTITY_OWNER_STRING = None
STEWARD_STRING = 'STEWARD'
TRUSTEE_STRING = 'TRUSTEE'
# TXNs
NODE = PlenumTransactions.NODE.value
NYM = PlenumTransactions.NYM.value
AUDIT = PlenumTransactions.AUDIT.value
GET_TXN = PlenumTransactions.GET_TXN.value
TXN_AUTHOR_AGREEMENT = PlenumTransactions.TXN_AUTHOR_AGREEMENT.value
TXN_AUTHOR_AGREEMENT_AML = PlenumTransactions.TXN_AUTHOR_AGREEMENT_AML.value
TXN_AUTHOR_AGREEMENT_DISABLE = PlenumTransactions.TXN_AUTHOR_AGREEMENT_DISABLE.value
GET_TXN_AUTHOR_AGREEMENT = PlenumTransactions.GET_TXN_AUTHOR_AGREEMENT.value
GET_TXN_AUTHOR_AGREEMENT_AML = PlenumTransactions.GET_TXN_AUTHOR_AGREEMENT_AML.value
CURRENT_TXN_PAYLOAD_VERSIONS = defaultdict(lambda: "1")
CURRENT_TXN_PAYLOAD_VERSIONS[TXN_AUTHOR_AGREEMENT] = "2"
CURRENT_TXN_VERSION = "1"
# TXN
# TODO: manye of these constants will be replaced
# by constants from Request after Request refactoring
TXN_PAYLOAD = "txn"
TXN_PAYLOAD_TYPE = "type"
TXN_PAYLOAD_PROTOCOL_VERSION = "protocolVersion"
TXN_PAYLOAD_DATA = "data"
TXN_PAYLOAD_VERSION = "ver"
TXN_PAYLOAD_METADATA = "metadata"
TXN_PAYLOAD_METADATA_FROM = "from"
TXN_PAYLOAD_METADATA_ENDORSER = "endorser"
TXN_PAYLOAD_METADATA_REQ_ID = "reqId"
TXN_PAYLOAD_METADATA_DIGEST = "digest"
TXN_PAYLOAD_METADATA_PAYLOAD_DIGEST = "payloadDigest"
TXN_PAYLOAD_METADATA_TAA_ACCEPTANCE = "taaAcceptance"
TXN_METADATA = "txnMetadata"
TXN_METADATA_TIME = "txnTime"
TXN_METADATA_ID = "txnId"
TXN_METADATA_SEQ_NO = "seqNo"
TXN_SIGNATURE = "reqSignature"
TXN_VERSION = "ver"
TXN_SIGNATURE_TYPE = "type"
ED25519 = "ED25519"
TXN_SIGNATURE_VALUES = "values"
TXN_SIGNATURE_FROM = "from"
TXN_SIGNATURE_VALUE = "value"
TXN_AUTHOR_AGREEMENT_TEXT = "text"
TXN_AUTHOR_AGREEMENT_VERSION = "version"
TXN_AUTHOR_AGREEMENT_DIGEST = "digest"
TXN_AUTHOR_AGREEMENT_RETIREMENT_TS = "retirement_ts"
TXN_AUTHOR_AGREEMENT_RATIFICATION_TS = "ratification_ts"
GET_TXN_AUTHOR_AGREEMENT_VERSION = "version"
GET_TXN_AUTHOR_AGREEMENT_DIGEST = "digest"
GET_TXN_AUTHOR_AGREEMENT_TIMESTAMP = "timestamp"
GET_TXN_AUTHOR_AGREEMENT_AML_VERSION = "version"
GET_TXN_AUTHOR_AGREEMENT_AML_TIMESTAMP = "timestamp"
class ClientBootStrategy(IntEnum):
Simple = 1
PoolTxn = 2
Custom = 3
class StorageType(IntEnum):
File = 1
Ledger = 2
class KeyValueStorageType(IntEnum):
Leveldb = 1
Memory = 2
Rocksdb = 3
ChunkedBinaryFile = 4
BinaryFile = 5
class PreVCStrategies(IntEnum):
VC_START_MSG_STRATEGY = 1
@unique
class LedgerState(IntEnum):
not_synced = 1 # Still gathering consistency proofs
syncing = 2 # Got sufficient consistency proofs, will be sending catchup
# requests and waiting for their replies
synced = 3 # Got replies for all catchup requests, indicating catchup
# complete for the ledger
OP_FIELD_NAME = "op"
CLIENT_STACK_SUFFIX = "C"
CLIENT_BLACKLISTER_SUFFIX = "BLC"
NODE_BLACKLISTER_SUFFIX = "BLN"
NODE_PRIMARY_STORAGE_SUFFIX = "PS"
NODE_TXN_STORE_SUFFIX = "TS"
NODE_HASH_STORE_SUFFIX = "HS"
HS_FILE = "file"
HS_MEMORY = "memory"
HS_LEVELDB = 'leveldb'
HS_ROCKSDB = 'rocksdb'
LAST_SENT_PRE_PREPARE = 'lastSentPrePrepare'
PLUGIN_BASE_DIR_PATH = "PluginBaseDirPath"
POOL_LEDGER_ID = 0
DOMAIN_LEDGER_ID = 1
CONFIG_LEDGER_ID = 2
AUDIT_LEDGER_ID = 3
# Store labels
BLS_LABEL = 'bls'
TS_LABEL = 'ts'
IDR_CACHE_LABEL = 'idr'
ATTRIB_LABEL = 'attrib'
SEQ_NO_DB_LABEL = 'seq_no_db'
NODE_STATUS_DB_LABEL = 'node_status_db'
LAST_SENT_PP_STORE_LABEL = 'last_sent_pp_store'
VALID_LEDGER_IDS = (POOL_LEDGER_ID, DOMAIN_LEDGER_ID, CONFIG_LEDGER_ID, AUDIT_LEDGER_ID)
CURRENT_PROTOCOL_VERSION = PlenumProtocolVersion.TXN_FORMAT_1_0_SUPPORT.value
OPERATION_SCHEMA_IS_STRICT = False
SCHEMA_IS_STRICT = False
GENERAL_LIMIT_SIZE = 256
|
py
|
1a5c7e07e7007bbd70407f96f56804bfeddbc7d0
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import idl_schema
import unittest
def getFunction(schema, name):
for item in schema['functions']:
if item['name'] == name:
return item
raise KeyError('Missing function %s' % name)
def getParams(schema, name):
function = getFunction(schema, name)
return function['parameters']
def getReturns(schema, name):
function = getFunction(schema, name)
return function['returns']
def getType(schema, id):
for item in schema['types']:
if item['id'] == id:
return item
class IdlSchemaTest(unittest.TestCase):
def setUp(self):
loaded = idl_schema.Load('test/idl_basics.idl')
self.assertEquals(1, len(loaded))
self.assertEquals('idl_basics', loaded[0]['namespace'])
self.idl_basics = loaded[0]
def testSimpleCallbacks(self):
schema = self.idl_basics
expected = [{'type':'function', 'name':'cb', 'parameters':[]}]
self.assertEquals(expected, getParams(schema, 'function4'))
expected = [{'type':'function', 'name':'cb',
'parameters':[{'name':'x', 'type':'integer'}]}]
self.assertEquals(expected, getParams(schema, 'function5'))
expected = [{'type':'function', 'name':'cb',
'parameters':[{'name':'arg', '$ref':'MyType1'}]}]
self.assertEquals(expected, getParams(schema, 'function6'))
def testCallbackWithArrayArgument(self):
schema = self.idl_basics
expected = [{'type':'function', 'name':'cb',
'parameters':[{'name':'arg', 'type':'array',
'items':{'$ref':'MyType2'}}]}]
self.assertEquals(expected, getParams(schema, 'function12'))
def testArrayOfCallbacks(self):
schema = idl_schema.Load('test/idl_callback_arrays.idl')[0]
expected = [{'type':'array', 'name':'callbacks',
'items':{'type':'function', 'name':'MyCallback',
'parameters':[{'type':'integer', 'name':'x'}]}}]
self.assertEquals(expected, getParams(schema, 'whatever'))
def testLegalValues(self):
self.assertEquals({
'x': {'name': 'x', 'type': 'integer', 'enum': [1,2],
'description': 'This comment tests "double-quotes".'},
'y': {'name': 'y', 'type': 'string'},
'z': {'name': 'z', 'type': 'string'},
'a': {'name': 'a', 'type': 'string'},
'b': {'name': 'b', 'type': 'string'},
'c': {'name': 'c', 'type': 'string'}},
getType(self.idl_basics, 'MyType1')['properties'])
def testMemberOrdering(self):
self.assertEquals(
['x', 'y', 'z', 'a', 'b', 'c'],
getType(self.idl_basics, 'MyType1')['properties'].keys())
def testEnum(self):
schema = self.idl_basics
expected = {'enum': [{'name': 'name1', 'description': 'comment1'},
{'name': 'name2'}],
'description': 'Enum description',
'type': 'string', 'id': 'EnumType'}
self.assertEquals(expected, getType(schema, expected['id']))
expected = [{'name':'type', '$ref':'EnumType'},
{'type':'function', 'name':'cb',
'parameters':[{'name':'type', '$ref':'EnumType'}]}]
self.assertEquals(expected, getParams(schema, 'function13'))
expected = [{'items': {'$ref': 'EnumType'}, 'name': 'types',
'type': 'array'}]
self.assertEquals(expected, getParams(schema, 'function14'))
def testNoCompile(self):
schema = self.idl_basics
func = getFunction(schema, 'function15')
self.assertTrue(func is not None)
self.assertTrue(func['nocompile'])
def testNoDocOnEnum(self):
schema = self.idl_basics
enum_with_nodoc = getType(schema, 'EnumTypeWithNoDoc')
self.assertTrue(enum_with_nodoc is not None)
self.assertTrue(enum_with_nodoc['nodoc'])
def testInternalNamespace(self):
idl_basics = self.idl_basics
self.assertEquals('idl_basics', idl_basics['namespace'])
self.assertTrue(idl_basics['internal'])
self.assertFalse(idl_basics['nodoc'])
def testReturnTypes(self):
schema = self.idl_basics
self.assertEquals({'name': 'function19', 'type': 'integer'},
getReturns(schema, 'function19'))
self.assertEquals({'name': 'function20', '$ref': 'MyType1',
'optional': True},
getReturns(schema, 'function20'))
self.assertEquals({'name': 'function21', 'type': 'array',
'items': {'$ref': 'MyType1'}},
getReturns(schema, 'function21'))
self.assertEquals({'name': 'function22', '$ref': 'EnumType',
'optional': True},
getReturns(schema, 'function22'))
self.assertEquals({'name': 'function23', 'type': 'array',
'items': {'$ref': 'EnumType'}},
getReturns(schema, 'function23'))
def testChromeOSPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_chromeos.idl')[0]
self.assertEquals('idl_namespace_chromeos', schema['namespace'])
expected = ['chromeos']
self.assertEquals(expected, schema['platforms'])
def testAllPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_all_platforms.idl')[0]
self.assertEquals('idl_namespace_all_platforms', schema['namespace'])
expected = ['chromeos', 'chromeos_touch', 'linux', 'mac', 'win']
self.assertEquals(expected, schema['platforms'])
def testNonSpecificPlatformsNamespace(self):
schema = idl_schema.Load('test/idl_namespace_non_specific_platforms.idl')[0]
self.assertEquals('idl_namespace_non_specific_platforms',
schema['namespace'])
expected = None
self.assertEquals(expected, schema['platforms'])
def testSpecificImplementNamespace(self):
schema = idl_schema.Load('test/idl_namespace_specific_implement.idl')[0]
self.assertEquals('idl_namespace_specific_implement',
schema['namespace'])
expected = 'idl_namespace_specific_implement.idl'
self.assertEquals(expected, schema['compiler_options']['implemented_in'])
def testSpecificImplementOnChromeOSNamespace(self):
schema = idl_schema.Load(
'test/idl_namespace_specific_implement_chromeos.idl')[0]
self.assertEquals('idl_namespace_specific_implement_chromeos',
schema['namespace'])
expected_implemented_path = 'idl_namespace_specific_implement_chromeos.idl'
expected_platform = ['chromeos']
self.assertEquals(expected_implemented_path,
schema['compiler_options']['implemented_in'])
self.assertEquals(expected_platform, schema['platforms'])
def testCallbackComment(self):
schema = self.idl_basics
self.assertEquals('A comment on a callback.',
getParams(schema, 'function16')[0]['description'])
self.assertEquals(
'A parameter.',
getParams(schema, 'function16')[0]['parameters'][0]['description'])
self.assertEquals(
'Just a parameter comment, with no comment on the callback.',
getParams(schema, 'function17')[0]['parameters'][0]['description'])
self.assertEquals(
'Override callback comment.',
getParams(schema, 'function18')[0]['description'])
def testFunctionComment(self):
schema = self.idl_basics
func = getFunction(schema, 'function3')
self.assertEquals(('This comment should appear in the documentation, '
'despite occupying multiple lines.'),
func['description'])
self.assertEquals(
[{'description': ('So should this comment about the argument. '
'<em>HTML</em> is fine too.'),
'name': 'arg',
'$ref': 'MyType1'}],
func['parameters'])
func = getFunction(schema, 'function4')
self.assertEquals(('This tests if "double-quotes" are escaped correctly.'
'<br/><br/> It also tests a comment with two newlines.'),
func['description'])
def testReservedWords(self):
schema = idl_schema.Load('test/idl_reserved_words.idl')[0]
foo_type = getType(schema, 'Foo')
self.assertEquals([{'name': 'float'}, {'name': 'DOMString'}],
foo_type['enum'])
enum_type = getType(schema, 'enum')
self.assertEquals([{'name': 'callback'}, {'name': 'namespace'}],
enum_type['enum'])
dictionary = getType(schema, 'dictionary')
self.assertEquals('integer', dictionary['properties']['long']['type'])
mytype = getType(schema, 'MyType')
self.assertEquals('string', mytype['properties']['interface']['type'])
params = getParams(schema, 'static')
self.assertEquals('Foo', params[0]['$ref'])
self.assertEquals('enum', params[1]['$ref'])
def testObjectTypes(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
foo_type = getType(schema, 'FooType')
self.assertEquals('object', foo_type['type'])
self.assertEquals('integer', foo_type['properties']['x']['type'])
self.assertEquals('object', foo_type['properties']['y']['type'])
self.assertEquals(
'any',
foo_type['properties']['y']['additionalProperties']['type'])
self.assertEquals('object', foo_type['properties']['z']['type'])
self.assertEquals(
'any',
foo_type['properties']['z']['additionalProperties']['type'])
self.assertEquals('Window', foo_type['properties']['z']['isInstanceOf'])
bar_type = getType(schema, 'BarType')
self.assertEquals('object', bar_type['type'])
self.assertEquals('any', bar_type['properties']['x']['type'])
def testObjectTypesInFunctions(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
params = getParams(schema, 'objectFunction1')
self.assertEquals('object', params[0]['type'])
self.assertEquals('any', params[0]['additionalProperties']['type'])
self.assertEquals('ImageData', params[0]['isInstanceOf'])
params = getParams(schema, 'objectFunction2')
self.assertEquals('any', params[0]['type'])
def testObjectTypesWithOptionalFields(self):
schema = idl_schema.Load('test/idl_object_types.idl')[0]
baz_type = getType(schema, 'BazType')
self.assertEquals(True, baz_type['properties']['x']['optional'])
self.assertEquals('integer', baz_type['properties']['x']['type'])
self.assertEquals(True, baz_type['properties']['foo']['optional'])
self.assertEquals('FooType', baz_type['properties']['foo']['$ref'])
if __name__ == '__main__':
unittest.main()
|
py
|
1a5c7f249b79f747065c2ab70064269ab4be419d
|
from django.contrib.auth import get_user_model
from django.contrib.staticfiles.templatetags.staticfiles import static
from django.db import models
from django.urls import reverse
from meta.models import ModelMeta
class Club(ModelMeta, models.Model):
'''
Club
'''
name = models.CharField(
'nombre', max_length=64
)
slug = models.SlugField(
'slug', max_length=64, unique=True
)
description = models.TextField(
'descripción', max_length=300
)
document = models.FileField(
'documento', upload_to='clubs/docs/', blank=True,
help_text='Útil para bases de concursos o información de actividades.'
)
document_name = models.CharField(
'nombre del documento', max_length=120, default='', blank=True,
help_text='Texto que aparecerá en el enlace del documento.'
)
image = models.ImageField(
'imagen', upload_to='clubs/', blank=True,
help_text='Imagen para mostrar en la lista de clubes'
)
telegram_group = models.CharField(
'grupo de telegram', max_length=64, blank=True, default=''
)
telegram_group_link = models.CharField(
'enlace al grupo de telegram', max_length=64, blank=True, default=''
)
managers = models.ManyToManyField(
get_user_model(), 'managed_clubs', verbose_name='gestores'
)
members = models.ManyToManyField(
get_user_model(), 'clubs', verbose_name='miembros'
)
_metadata = {
'title': 'name',
'description': 'description',
'image': 'get_image',
}
class Meta:
verbose_name = 'club'
verbose_name_plural = 'clubes'
permissions = [
('can_link_club', 'Puede vincular un grupo de Telegram con un club')
]
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('clubs:detail', args=[self.slug])
def get_image(self):
return self.image.url if self.image else static('images/favicon.png')
class ClubMeeting(models.Model):
'''
Club meeting
'''
club = models.ForeignKey(
Club, models.CASCADE, 'meetings', verbose_name='club'
)
title = models.CharField(
'título', max_length=200, blank=True
)
place = models.CharField(
'lugar', max_length=120
)
moment = models.DateTimeField('fecha')
class Meta:
verbose_name = 'quedada'
ordering = ['moment']
def __str__(self):
return '{} en {} ({})'.format(
self.club.name, self.place, self.moment.strftime('%d %b %Y %H:%M')
)
|
py
|
1a5c7f3d4f967618e1bd5e6b306395e6fd8505db
|
from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class Font(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "cone.legendgrouptitle"
_path_str = "cone.legendgrouptitle.font"
_valid_props = {"color", "family", "size"}
# color
# -----
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
# family
# ------
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser will only be able to apply a font if
it is available on the system which it operates. Provide
multiple font families, separated by commas, to indicate the
preference in which to apply fonts if they aren't available on
the system. The Chart Studio Cloud (at https://chart-
studio.plotly.com or on-premise) generates images on a server,
where only a select number of fonts are installed and
supported. These include "Arial", "Balto", "Courier New",
"Droid Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
# size
# ----
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
"""
def __init__(self, arg=None, color=None, family=None, size=None, **kwargs):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.cone.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser will only be able to
apply a font if it is available on the system which it
operates. Provide multiple font families, separated by
commas, to indicate the preference in which to apply
fonts if they aren't available on the system. The Chart
Studio Cloud (at https://chart-studio.plotly.com or on-
premise) generates images on a server, where only a
select number of fonts are installed and supported.
These include "Arial", "Balto", "Courier New", "Droid
Sans",, "Droid Serif", "Droid Sans Mono", "Gravitas
One", "Old Standard TT", "Open Sans", "Overpass", "PT
Sans Narrow", "Raleway", "Times New Roman".
size
Returns
-------
Font
"""
super(Font, self).__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.cone.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.cone.legendgrouptitle.Font`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("color", None)
_v = color if color is not None else _v
if _v is not None:
self["color"] = _v
_v = arg.pop("family", None)
_v = family if family is not None else _v
if _v is not None:
self["family"] = _v
_v = arg.pop("size", None)
_v = size if size is not None else _v
if _v is not None:
self["size"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
|
py
|
1a5c7f959b3398d00ce5f339a28a1d394fb82bca
|
'''
Utility functions.
'''
from datetime import datetime, timezone
def timezone_aware(date: datetime) -> datetime:
'''
Convert naive to timezone-aware datetime (UTC timezone).
Parameters:
date (datetime): Datetime object.
Returns:
datetime: A timezone-aware datetime.
'''
return date.replace(tzinfo=timezone.utc) if date.tzinfo is None else date
__all__ = [
'timezone_aware'
]
|
py
|
1a5c8021090b45349e01892b8e6c9b5298f75827
|
import pandas as pd
import trackpy as tp
def remove_short_tracks(tracks, threshold_length=7):
"""Filter les tracks qui font plus de X pixels.
Parameters
----------
tracks: pd.DataFrame
contient les traces brutes des nanoparticle
threshold_length: float
le nombre de pixels minimum pour avoir une trace valide
Returns
-------
t_neurons: pd.DataFrame
seulement une partie des traces obtenues en entréee.
"""
print("[filter.py] Je filtre les tracks qui sont trop courtes")
# t_neurons will contain the trajectories of interest
t_neurons = pd.DataFrame()
Ntraj = 0
for item in set(tracks.particle):
sub = tracks[tracks.particle == item] # selection of the item-th particle trajectory
distance = tp.motion.diagonal_size(sub)
# distance is an estimation of the particle displacement if the displacement
# is roughly linear
if distance > threshold_length:
Ntraj += 1
t_neurons = t_neurons.append(sub)
print(str(Ntraj) + ' trajectoires retenues')
return t_neurons
|
bzl
|
1a5c803f2b122ccd88d4fd87efc4d8ab5c30abc7
|
# Copyright 2019 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of tvOS rules."""
load(
"@build_bazel_rules_apple//apple/internal:apple_product_type.bzl",
"apple_product_type",
)
load(
"@build_bazel_rules_apple//apple/internal:linking_support.bzl",
"linking_support",
)
load(
"@build_bazel_rules_apple//apple/internal:outputs.bzl",
"outputs",
)
load(
"@build_bazel_rules_apple//apple/internal:partials.bzl",
"partials",
)
load(
"@build_bazel_rules_apple//apple/internal:platform_support.bzl",
"platform_support",
)
load(
"@build_bazel_rules_apple//apple/internal:processor.bzl",
"processor",
)
load(
"@build_bazel_rules_apple//apple/internal:rule_factory.bzl",
"rule_factory",
)
load(
"@build_bazel_rules_apple//apple/internal:run_support.bzl",
"run_support",
)
load(
"@build_bazel_rules_apple//apple:providers.bzl",
"TvosApplicationBundleInfo",
"TvosExtensionBundleInfo",
"TvosFrameworkBundleInfo",
)
def _tvos_application_impl(ctx):
"""Experimental implementation of tvos_application."""
top_level_attrs = [
"app_icons",
"launch_images",
"strings",
]
binary_descriptor = linking_support.register_linking_action(ctx)
binary_artifact = binary_descriptor.artifact
debug_outputs_provider = binary_descriptor.debug_outputs_provider
bundle_id = ctx.attr.bundle_id
embeddable_targets = ctx.attr.extensions + ctx.attr.frameworks
swift_dylib_dependencies = ctx.attr.extensions + ctx.attr.frameworks
processor_partials = [
partials.app_assets_validation_partial(
app_icons = ctx.files.app_icons,
launch_images = ctx.files.launch_images,
),
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.bitcode_symbols_partial(
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = embeddable_targets,
package_bitcode = True,
),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = embeddable_targets,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
bundle_embedded_bundles = True,
embeddable_targets = embeddable_targets,
),
partials.framework_import_partial(
targets = ctx.attr.deps + embeddable_targets,
),
partials.resources_partial(
bundle_id = bundle_id,
bundle_verification_targets = [struct(target = ext) for ext in ctx.attr.extensions],
plist_attrs = ["infoplists"],
targets_to_avoid = ctx.attr.frameworks,
top_level_attrs = top_level_attrs,
),
partials.settings_bundle_partial(),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = swift_dylib_dependencies,
bundle_dylibs = True,
package_swift_support_if_needed = True,
),
]
if platform_support.is_device_build(ctx):
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
executable = outputs.executable(ctx)
run_support.register_simulator_executable(ctx, executable)
return [
DefaultInfo(
executable = executable,
files = processor_result.output_files,
runfiles = ctx.runfiles(
files = [
outputs.archive(ctx),
ctx.file._std_redirect_dylib,
],
),
),
TvosApplicationBundleInfo(),
# Propagate the binary provider so that this target can be used as bundle_loader in test
# rules.
binary_descriptor.provider,
] + processor_result.providers
def _tvos_framework_impl(ctx):
"""Experimental implementation of tvos_framework."""
binary_descriptor = linking_support.register_linking_action(ctx)
binary_artifact = binary_descriptor.artifact
binary_provider = binary_descriptor.provider
debug_outputs_provider = binary_descriptor.debug_outputs_provider
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.bitcode_symbols_partial(
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.frameworks,
),
# TODO(kaipi): Check if clang_rt dylibs are needed in Frameworks, or if
# the can be skipped.
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = ctx.attr.frameworks,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
frameworks = [outputs.archive(ctx)],
embeddable_targets = ctx.attr.frameworks,
),
partials.extension_safe_validation_partial(is_extension_safe = ctx.attr.extension_safe),
partials.framework_headers_partial(hdrs = ctx.files.hdrs),
partials.framework_provider_partial(binary_provider = binary_provider),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
targets_to_avoid = ctx.attr.frameworks,
version_keys_required = False,
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.frameworks,
),
]
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(files = processor_result.output_files),
TvosFrameworkBundleInfo(),
] + processor_result.providers
def _tvos_extension_impl(ctx):
"""Experimental implementation of tvos_extension."""
top_level_attrs = [
"app_icons",
"strings",
]
binary_descriptor = linking_support.register_linking_action(ctx)
binary_artifact = binary_descriptor.artifact
debug_outputs_provider = binary_descriptor.debug_outputs_provider
bundle_id = ctx.attr.bundle_id
processor_partials = [
partials.apple_bundle_info_partial(bundle_id = bundle_id),
partials.binary_partial(binary_artifact = binary_artifact),
partials.bitcode_symbols_partial(
binary_artifact = binary_artifact,
debug_outputs_provider = debug_outputs_provider,
dependency_targets = ctx.attr.frameworks,
),
partials.clang_rt_dylibs_partial(binary_artifact = binary_artifact),
partials.debug_symbols_partial(
debug_dependencies = ctx.attr.frameworks,
debug_outputs_provider = debug_outputs_provider,
),
partials.embedded_bundles_partial(
plugins = [outputs.archive(ctx)],
embeddable_targets = ctx.attr.frameworks,
),
partials.extension_safe_validation_partial(is_extension_safe = True),
partials.resources_partial(
bundle_id = bundle_id,
plist_attrs = ["infoplists"],
targets_to_avoid = ctx.attr.frameworks,
top_level_attrs = top_level_attrs,
),
partials.swift_dylibs_partial(
binary_artifact = binary_artifact,
dependency_targets = ctx.attr.frameworks,
),
]
if platform_support.is_device_build(ctx):
processor_partials.append(
partials.provisioning_profile_partial(profile_artifact = ctx.file.provisioning_profile),
)
processor_result = processor.process(ctx, processor_partials)
return [
DefaultInfo(
files = processor_result.output_files,
),
TvosExtensionBundleInfo(),
] + processor_result.providers
tvos_application = rule_factory.create_apple_bundling_rule(
implementation = _tvos_application_impl,
platform_type = "tvos",
product_type = apple_product_type.application,
doc = "Builds and bundles a tvOS Application.",
)
tvos_extension = rule_factory.create_apple_bundling_rule(
implementation = _tvos_extension_impl,
platform_type = "tvos",
product_type = apple_product_type.app_extension,
doc = "Builds and bundles a tvOS Extension.",
)
tvos_framework = rule_factory.create_apple_bundling_rule(
implementation = _tvos_framework_impl,
platform_type = "tvos",
product_type = apple_product_type.framework,
doc = "Builds and bundles a tvOS Dynamic Framework.",
)
|
py
|
1a5c80ab6f0c06783f4d0e9551701ed39a95c25b
|
import tensorflow as tf
from sklearn.model_selection import train_test_split
import unicodedata
import re
import io
'''
Handles data loading, pre-processing and tokenizing
'''
class DataHandler():
'''
Creates a tf.data.Dataset object to feed the network
with tensors and in batches
X: (tensor) with input data
y: (tensor) with data labels (X paired)
batch_size: (int) representing desired batch size
returns: (tf.data.Dataset) dataset object
(int) length of the dataset
'''
def create_tf_dataset(self, X, y, batch_size=64):
data_length = len(X)
dataset = tf.data.Dataset.from_tensor_slices((X, y)).shuffle(data_length)
dataset = dataset.batch(batch_size, drop_remainder=True)
return dataset, data_length
'''
Loads data from a file with the following format:
<sentence_1_lang_A> <tab> <sentence_1_lang_B>
<sentence_2_lang_A> <tab> <sentence_2_lang_B>
<sentence_3_lang_A> <tab> <sentence_3_lang_B>
...
<sentence_N_lang_A> <tab> <sentence_N_lang_B>
path: (string) path to the dataset file
num_examples: (int) 0 for full load or N to limit max load
'''
def load_from_file(self, path, num_examples=0):
lines = io.open(path, encoding='UTF-8').read().strip().split('\n')
if num_examples == 0:
num_examples = len(lines)
word_pairs = [[ self.process_sentence(w) for w in l.split('\t')] for l in lines[:num_examples]]
return zip(*word_pairs)
'''
Splits a dataset represented by X, y data pairs by a factor
X: (tensor) with input data
y: (tensor) with data labels (X paired)
split_size: (float) number between 0 and 1
returns: (tensor) splitted X, y pairs
'''
def split_data(self, X, y, split_size=0.25):
X, X_test, y, y_test = train_test_split(X, y, test_size=split_size)
return X, X_test, y, y_test
'''
Given a tensor of sentences, returns the maximun number of words found
on a sentence
tensor: (tensor) an array of sentences where each sentence is a tensor
returns: (int) max number of words found
'''
def max_length(self, tensor):
return max(len(t) for t in tensor)
'''
Cleans a given text by removing accents and unwanted characters
and adds start/stop tokens
'''
def process_sentence(self, w):
w = self.unicode_to_ascii(w.lower().strip())
# Creates a space between a word and the punctuation following it
w = re.sub(r"([?.!,¿])", r" \1 ", w)
w = re.sub(r'[" "]+', " ", w)
# Replace everything with a space except (a-z, A-Z, ".", "?", "!", ",")
w = re.sub(r"[^a-zA-Z?.!,¿]+", " ", w)
w = w.rstrip().strip()
w = '<start> ' + w + ' <end>'
return w
'''
Creates a vocbulary for a given text
Returns a list of word_id sequence for each sentence and a tokenizer
'''
def tokenize(self, text):
vocab_tokenizer = tf.keras.preprocessing.text.Tokenizer(filters='')
vocab_tokenizer.fit_on_texts(text)
tensor = vocab_tokenizer.texts_to_sequences(text)
tensor = tf.keras.preprocessing.sequence.pad_sequences(tensor, padding='post')
return tensor, vocab_tokenizer
def unicode_to_ascii(self, s):
return ''.join(c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn')
|
py
|
1a5c81951348eeb5f3aefd337fbcf826abc0d5b7
|
from abc import ABCMeta, abstractproperty
class Generator(object):
__metaclass__ = ABCMeta
def __init__(self, conanfile):
self.conanfile = conanfile
self._deps_build_info = conanfile.deps_cpp_info
self._build_info = conanfile.cpp_info
self._deps_env_info = conanfile.deps_env_info
self._env_info = conanfile.env_info
self._deps_user_info = conanfile.deps_user_info
@property
def deps_build_info(self):
return self._deps_build_info
@property
def build_info(self):
return self._build_info
@property
def deps_env_info(self):
return self._deps_env_info
@property
def deps_user_info(self):
return self._deps_user_info
@property
def env_info(self):
return self._env_info
@property
def settings(self):
return self.conanfile.settings
@abstractproperty
def content(self):
raise NotImplementedError()
@abstractproperty
def filename(self):
raise NotImplementedError()
|
py
|
1a5c81bcab9b4bd6057814738a5c7ce36892f6a5
|
import json
import sys
import zipfile
with zipfile.ZipFile(sys.argv[1]) as z:
with z.open("result.json", "r") as f:
r = json.load(f)
print(r)
|
py
|
1a5c8215ae9b0990b1cd3a66a05dde8a9d319a55
|
from lib import *
def get_tick_labels(bins, ticks):
ticklabels = []
for i in ticks:
if i < len(bins):
ticklabels.append('%.2f'%(bins[int(i)]))
else:
ticklabels.append('%.2f'%(bins[-1])+'+')
return ticklabels
class Visualizer:
def __init__(self, action_labels):
self.n_action = len(action_labels)
self.action_labels = action_labels
def plot_a_episode(self,
env, model,
explored_cum_rewards, explored_actions,
safe_cum_rewards, safe_actions,
fig_path):
f, axs = plt.subplots(3,1,sharex=True, figsize=(14,14))
ax_price, ax_action, ax_Q = axs
ls = ['-','--']
for i in range(min(2,env.prices.shape[1])):
p = env.prices[:,i]/env.prices[0,i]*100 - 100
ax_price.plot(p, 'k'+ls[i], label='input%i - 100'%i)
ax_price.plot(explored_cum_rewards, 'b', label='explored P&L')
ax_price.plot(safe_cum_rewards, 'r', label='safe P&L')
ax_price.legend(loc='best', frameon=False)
ax_price.set_title(env.title+', ideal: %.1f, safe: %.1f, explored: %1.f'%(
env.max_profit, safe_cum_rewards[-1], explored_cum_rewards[-1]))
ax_action.plot(explored_actions, 'b', label='explored')
ax_action.plot(safe_actions, 'r', label='safe', linewidth=2)
ax_action.set_ylim(-0.4, self.n_action-0.6)
ax_action.set_ylabel('action')
ax_action.set_yticks(range(self.n_action))
ax_action.legend(loc='best', frameon=False)
style = ['k','r','b']
qq = []
for t in xrange(env.t0):
qq.append([np.nan] * self.n_action)
for t in xrange(env.t0, env.t_max):
qq.append(model.predict(env.get_state(t)))
for i in xrange(self.n_action):
ax_Q.plot([float(qq[t][i]) for t in xrange(len(qq))],
style[i], label=self.action_labels[i])
ax_Q.set_ylabel('Q')
ax_Q.legend(loc='best', frameon=False)
ax_Q.set_xlabel('t')
plt.subplots_adjust(wspace=0.4)
plt.savefig(fig_path)
plt.close()
def plot_episodes(self,
explored_total_rewards, safe_total_rewards, explorations,
fig_path, MA_window=100):
f = plt.figure(figsize=(14,10)) # width, height in inch (100 pixel)
if explored_total_rewards is None:
f, ax_reward = plt.subplots()
else:
figshape = (3,1)
ax_reward = plt.subplot2grid(figshape, (0, 0), rowspan=2)
ax_exploration = plt.subplot2grid(figshape, (2, 0), sharex=ax_reward)
tt = range(len(safe_total_rewards))
if explored_total_rewards is not None:
ma = pd.rolling_median(np.array(explored_total_rewards), window=MA_window, min_periods=1)
std = pd.rolling_std(np.array(explored_total_rewards), window=MA_window, min_periods=3)
ax_reward.plot(tt, explored_total_rewards,'bv', fillstyle='none')
ax_reward.plot(tt, ma, 'b', label='explored ma', linewidth=2)
ax_reward.plot(tt, std, 'b--', label='explored std', linewidth=2)
ma = pd.rolling_median(np.array(safe_total_rewards), window=MA_window, min_periods=1)
std = pd.rolling_std(np.array(safe_total_rewards), window=MA_window, min_periods=3)
ax_reward.plot(tt, safe_total_rewards,'ro', fillstyle='none')
ax_reward.plot(tt, ma,'r', label='safe ma', linewidth=2)
ax_reward.plot(tt, std,'r--', label='safe std', linewidth=2)
ax_reward.axhline(y=0, color='k', linestyle=':')
#ax_reward.axhline(y=60, color='k', linestyle=':')
ax_reward.set_ylabel('total reward')
ax_reward.legend(loc='best', frameon=False)
ax_reward.yaxis.tick_right()
ylim = ax_reward.get_ylim()
ax_reward.set_ylim((max(-100,ylim[0]), min(100,ylim[1])))
if explored_total_rewards is not None:
ax_exploration.plot(tt, np.array(explorations)*100., 'k')
ax_exploration.set_ylabel('exploration')
ax_exploration.set_xlabel('episode')
plt.savefig(fig_path)
plt.close()
def test_visualizer():
f = plt.figure()#figsize=(5,8))
axs_action = []
ncol = 3
nrow = 2
clim = (0,1)
ax = plt.subplot2grid((nrow, ncol), (0,ncol-1))
ax.matshow(np.random.random((2,2)), cmap='RdYlBu_r', clim=clim)
for action in range(3):
row = 1 + action/ncol
col = action%ncol
ax = plt.subplot2grid((nrow, ncol), (row,col))
cax = ax.matshow(np.random.random((2,2)), cmap='RdYlBu_r', clim=clim)
ax = plt.subplot2grid((nrow, ncol), (0,0), colspan=ncol-1)
cbar = f.colorbar(cax, ax=ax)
plt.show()
class VisualizerSequential:
def config(self):
pass
def __init__(self, model):
self.model = model
self.layers = []
for layer in self.model.layers:
self.layers.append(str(layer.name))
self.inter_models = dict()
model_input = self.model.input
for layer in self.layers:
self.inter_models[layer] = keras.models.Model(
inputs=model_input,
outputs=self.model.get_layer(layer).output)
self.config()
class VisualizerConv1D(VisualizerSequential):
def config(self):
self.n_channel = self.model.input.shape[2]
n_col = self.n_channel
for layer in self.layers:
shape = self.inter_models[layer].output.shape
if len(shape) == 3:
n_col = max(n_col, shape[2])
self.figshape = (len(self.layers)+1, int(n_col))
def plot(self, x):
f = plt.figure(figsize=(30,30))
for i in range(self.n_channel):
ax = plt.subplot2grid(self.figshape, (0,i))
ax.plot(x[0,:,i], '.-')
ax.set_title('input, channel %i'%i)
for i_layer in range(len(self.layers)):
layer = self.layers[i_layer]
z = self.inter_models[layer].predict(x)
print('plotting '+layer)
if len(z.shape) == 3:
for i in range(z.shape[2]):
ax = plt.subplot2grid(self.figshape, (i_layer+1, i))
ax.plot(z[0,:,i], '.-')
ax.set_title(layer+' filter %i'%i)
else:
ax = plt.subplot2grid(self.figshape, (i_layer+1, 0))
ax.plot(z[0,:], '.-')
ax.set_title(layer)
ax.set_ylim(-100,100)
def print_w(self):
layer = self.layers[0]
ww = self.inter_models[layer].get_weights()
for w in ww:
print(w.shape)
print(w)
"""
def test_VisualizerConv1D():
from agents import *
from sampler import *
fld = os.path.join('models','SinSampler(2, 5, 60) large')
qmodel = QModelConv(None, None)
qmodel.load(fld)
vis_conv = VisualizerConv1D(qmodel.model)
print vis_conv.layers
#return
vis_conv.print_w()
return
sampler = SinSampler(2, 5, 60)
x_all, title = sampler.sample(['fake'])
fld_fig = os.path.join(fld,'vis_conv', title.replace('/','|'))
makedirs(fld_fig)
for t in range(20, 60):
state = x_all[t-20:t,:]
state = (state.copy()/state[-1]-1.)*100
vis_conv.plot(np.reshape(state, (1, 20, 1)))
plt.savefig(os.path.join(fld_fig, 't%i.pdf'%t))
plt.close()
"""
if __name__ == '__main__':
#test()
#print np.isnan(float('nan'))
#test_VisualizerConv1D()
#histgram()
plot_price()
|
py
|
1a5c82f84f1b187a85762bd653928f204925f5fd
|
import agents as ag
import envgui as gui
import random
# ______________________________________________________________________________
loc_A, loc_B = (1, 1), (2, 1) # The two locations for the Vacuum world
def RandomVacuumAgent():
"Randomly choose one of the actions from the vacuum environment."
p = ag.RandomAgentProgram(['Right', 'Left', 'Up', 'Down', 'Suck', 'NoOp'])
return ag.Agent(p)
def TableDrivenVacuumAgent():
"[Figure 2.3]"
table = {((loc_A, 'Clean'),): 'Right',
((loc_A, 'Dirty'),): 'Suck',
((loc_B, 'Clean'),): 'Left',
((loc_B, 'Dirty'),): 'Suck',
((loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Clean')): 'Right',
((loc_A, 'Clean'), (loc_A, 'Clean'), (loc_A, 'Dirty')): 'Suck',
# ...
}
p = ag.TableDrivenAgentProgram(table)
return ag.Agent()
def ReflexVacuumAgent():
"A reflex agent for the two-state vacuum environment. [Figure 2.8]"
def program(percept):
location, status = percept
if status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
def ModelBasedVacuumAgent() -> object:
"An agent that keeps track of what locations are clean or dirty."
model = {loc_A: None, loc_B: None}
def program(percept):
"Same as ReflexVacuumAgent, except if everything is clean, do NoOp."
location, status = percept
model[location] = status # Update the model here
if model[loc_A] == model[loc_B] == 'Clean':
return 'NoOp'
elif status == 'Dirty':
return 'Suck'
elif location == loc_A:
return 'Right'
elif location == loc_B:
return 'Left'
return ag.Agent(program)
# ______________________________________________________________________________
# Vacuum environment
class Dirt(ag.Thing):
pass
# class Floor(ag.Thing):
# pass
class VacuumEnvironment(ag.XYEnvironment):
"""The environment of [Ex. 2.12]. Agent perceives dirty or clean,
and bump (into obstacle) or not; 2D discrete world of unknown size;
performance measure is 100 for each dirt cleaned, and -1 for
each turn taken."""
def __init__(self, width=4, height=3):
super(VacuumEnvironment, self).__init__(width, height)
self.add_walls()
def thing_classes(self):
return [ag.Wall, Dirt, ReflexVacuumAgent, RandomVacuumAgent,
TableDrivenVacuumAgent, ModelBasedVacuumAgent]
def percept(self, agent):
"""The percept is a tuple of ('Dirty' or 'Clean', 'Bump' or 'None').
Unlike the TrivialVacuumEnvironment, location is NOT perceived."""
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
bump = ('Bump' if agent.bump else'None')
return (bump, status)
def execute_action(self, agent, action):
if action == 'Suck':
dirt_list = self.list_things_at(agent.location, Dirt)
if dirt_list != []:
dirt = dirt_list[0]
agent.performance += 100
self.delete_thing(dirt)
else:
super(VacuumEnvironment, self).execute_action(agent, action)
if action != 'NoOp':
agent.performance -= 1
class TrivialVacuumEnvironment(VacuumEnvironment):
"""This environment has two locations, A and B. Each can be Dirty
or Clean. The agent perceives its location and the location's
status. This serves as an example of how to implement a simple
Environment."""
def __init__(self):
super(TrivialVacuumEnvironment, self).__init__()
choice = random.randint(0, 3)
if choice % 2: # 1 or 3
self.add_thing(Dirt(), loc_A)
if choice > 1: # 2 or 3
self.add_thing(Dirt(), loc_B)
def percept(self, agent):
"Returns the agent's location, and the location status (Dirty/Clean)."
status = ('Dirty' if self.some_things_at(
agent.location, Dirt) else 'Clean')
return (agent.location, status)
#
# def execute_action(self, agent, action):
# """Change agent's location and/or location's status; track performance.
# Score 10 for each dirt cleaned; -1 for each move."""
# if action == 'Right':
# agent.location = loc_B
# agent.performance -= 1
# elif action == 'Left':
# agent.location = loc_A
# agent.performance -= 1
# elif action == 'Suck':
# if self.status[agent.location] == 'Dirty':
# agent.performance += 10
# self.status[agent.location] = 'Clean'
#
def add_agent(self, a):
"Agents start in either location at random."
super().add_thing(a, random.choice([loc_A, loc_B]))
# _________________________________________________________________________
# >>> a = ReflexVacuumAgent()
# >>> a.program((loc_A, 'Clean'))
# 'Right'
# >>> a.program((loc_B, 'Clean'))
# 'Left'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
# >>> a.program((loc_A, 'Dirty'))
# 'Suck'
#
# >>> e = TrivialVacuumEnvironment()
# >>> e.add_thing(ModelBasedVacuumAgent())
# >>> e.run(5)
# Produces text-based status output
# v = TrivialVacuumEnvironment()
# a = ModelBasedVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# v.run(10)
# Launch GUI of Trivial Environment
# v = TrivialVacuumEnvironment()
# a = RandomVacuumAgent()
# a = ag.TraceAgent(a)
# v.add_agent(a)
# g = gui.EnvGUI(v, 'Vaccuum')
# c = g.getCanvas()
# c.mapImageNames({
# Dirt: 'images/dirt.png',
# ag.Wall: 'images/wall.jpg',
# # Floor: 'images/floor.png',
# ag.Agent: 'images/vacuum.png',
# })
# c.update()
# g.mainloop()
# Launch GUI of more complex environment
v = VacuumEnvironment(5, 4)
#a = ModelBasedVacuumAgent()
a = RandomVacuumAgent()
a = ag.TraceAgent(a)
loc = v.random_location_inbounds()
v.add_thing(a, location=loc)
v.scatter_things(Dirt)
g = gui.EnvGUI(v, 'Vaccuum')
c = g.getCanvas()
c.mapImageNames({
ag.Wall: 'submissions/Zemgulys/myFace.jpg',
# Floor: 'images/floor.png',
Dirt: 'images/dirt.png',
ag.Agent: 'images/vacuum.png',
})
c.update()
g.mainloop()
|
py
|
1a5c83597a081381bceae05a60e22852cc65830c
|
"""
Copyright 2017 Pani Networks Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
#
# Functions for the calculation of a full Romana topology from the simplified
# user configuration.
#
from copy import copy
def calculate_num_groups(conf, num_networks=None):
"""
Calculates how many prefix groups we can have per AWS zone. Takes into
account that we need a route for each prefix group and we can't have more
than 48 route total.
"""
num_zones = len(conf['aws']['zones'])
num_nets = len(conf['networks']) if num_networks is None else \
num_networks
num_groups = 32
while num_groups * num_zones * num_nets > 48:
if num_groups == 1:
raise Exception("Too many networks and/or zones, reaching "
"50 route limit for AWS.")
num_groups //= 2
return num_groups
def _build_aws_topology(conf):
"""
Build a topology for am AWS VPC deployment.
"""
# - If just one zone, we need one group, since it's a flat network.
# - If it's more than one zone, we want many groups per zone, but
# the total number of groups should not exceed 50 or even 40.
# - We only have one topology if in VPC.
t = {
"networks" : [n['name'] for n in conf['networks']],
"map" : []
}
num_zones = len(conf['aws']['zones'])
if num_zones == 1:
t["map"].append({
"name" : conf['aws']['zones'][0],
"groups" : []
})
else:
num_groups = calculate_num_groups(conf)
for zone in conf['aws']['zones']:
m = {
"name" : zone,
"assignment" : {"failure-domain" : zone},
"groups" : []
}
for i in range(num_groups):
m["groups"].append({
"name" : "%s-%02d" % (zone, i),
"groups" : []
})
t["map"].append(m)
return t
def _build_dc_topology(conf):
"""
Build a topology for a routed data center network.
"""
t = {
"networks" : [n['name'] for n in conf['networks']],
}
top_level_group_label = None
cd = conf['datacenter']
if cd['flat_network']:
if cd['prefix_per_host']:
num_groups = cd['num_hosts']
top_level_group_label = "host-%d"
else:
num_groups = 1
else:
num_groups = cd['num_racks']
top_level_group_label = "rack-%d"
m = []
for i in range(num_groups):
g = {"groups" : []}
if top_level_group_label:
g["name"] = top_level_group_label % i
if not cd['flat_network']:
g["assignment"] = {"rack" : g["name"]}
m.append(g)
if not cd['flat_network']:
if cd['prefix_per_host']:
for top_level_group in m:
for i in range(cd['num_hosts_per_rack']):
g = {
"name" : "host-%d" % i,
"groups" : []
}
top_level_group["groups"].append(g)
t["map"] = m
return t
def build_topology(conf):
"""
From the user provided configuration, calculate the full topology config.
"""
topo = {"networks": [], "topologies" : []}
for n in conf['networks']:
net = copy(n)
# If block mask wasn't defined, we add a default value for it
if "block_mask" not in net:
net["block_mask"] = 29
topo["networks"].append(net)
if conf.get('aws'):
t = _build_aws_topology(conf)
else:
t = _build_dc_topology(conf)
topo["topologies"].append(t)
return topo
|
py
|
1a5c8445b56bce44b9f2dc1af2182dd62d80e2b2
|
#! /usr/bin/env python
import time
from redis import StrictRedis
from flask_sse import Message
import json
count = 0
redis=StrictRedis.from_url("redis://localhost")
print "Sending SSE events..."
while True:
msgBody = {"message":"I can count to %s" % count}
messageObj = Message(msgBody, type='greeting')
msg_json = json.dumps(messageObj.to_dict())
print msg_json
subCount = redis.publish(channel='sse', message=msg_json)
count += 1
time.sleep(0.25)
|
py
|
1a5c857203944685bc766fee2a1190523a14ead4
|
from __future__ import absolute_import, print_function, unicode_literals
from builtins import dict, str
import io
import os
import json
import time
import requests
import logging
import ndex2.client
from indra import get_config
logger = logging.getLogger(__name__)
ndex_base_url = 'http://52.37.175.128'
def get_default_ndex_cred(ndex_cred):
"""Gets the NDEx credentials from the dict, or tries the environment if None"""
if ndex_cred:
username = ndex_cred.get('user')
password = ndex_cred.get('password')
if username is not None and password is not None:
return username, password
username = get_config('NDEX_USERNAME')
password = get_config('NDEX_PASSWORD')
return username, password
def send_request(ndex_service_url, params, is_json=True, use_get=False):
"""Send a request to the NDEx server.
Parameters
----------
ndex_service_url : str
The URL of the service to use for the request.
params : dict
A dictionary of parameters to send with the request. Parameter keys
differ based on the type of request.
is_json : bool
True if the response is in json format, otherwise it is assumed to be
text. Default: False
use_get : bool
True if the request needs to use GET instead of POST.
Returns
-------
res : str
Depending on the type of service and the is_json parameter, this
function either returns a text string or a json dict.
"""
if use_get:
res = requests.get(ndex_service_url, json=params)
else:
res = requests.post(ndex_service_url, json=params)
status = res.status_code
# If response is immediate, we get 200
if status == 200:
if is_json:
return res.json()
else:
return res.text
# If there is a continuation of the message we get status 300, handled below.
# Otherwise we return None.
elif status != 300:
logger.error('Request returned with code %d' % status)
return None
# In case the response is not immediate, a task ID can be used to get
# the result.
task_id = res.json().get('task_id')
logger.info('NDEx task submitted...')
time_used = 0
try:
while status != 200:
res = requests.get(ndex_base_url + '/task/' + task_id)
status = res.status_code
if status != 200:
time.sleep(5)
time_used += 5
except KeyError:
next
return None
logger.info('NDEx task complete.')
if is_json:
return res.json()
else:
return res.text
def create_network(cx_str, ndex_cred=None, private=True):
"""Creates a new NDEx network of the assembled CX model.
To upload the assembled CX model to NDEx, you need to have
a registered account on NDEx (http://ndexbio.org/) and have
the `ndex` python package installed. The uploaded network
is private by default.
Parameters
----------
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password
Returns
-------
network_id : str
The UUID of the NDEx network that was created by uploading
the assembled CX model.
"""
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2('http://public.ndexbio.org',
username=username,
password=password)
cx_stream = io.BytesIO(cx_str.encode('utf-8'))
try:
logger.info('Uploading network to NDEx.')
network_uri = nd.save_cx_stream_as_new_network(cx_stream)
except Exception as e:
logger.error('Could not upload network to NDEx.')
logger.error(e)
return
network_id = network_uri.rsplit('/')[-1]
# Set the network to public. This often fails due to time-out issues,
# therefore we implement a wait and retry approach here.
if not private:
nretries = 3
for retry_idx in range(nretries):
time.sleep(3)
try:
logger.info('Making network public.')
nd.make_network_public(network_id)
break
except Exception:
msg = 'Setting network to public failed, '
if retry_idx + 1 < nretries:
logger.info(msg + 'retrying %d more times.' %
(nretries - (retry_idx + 1)))
else:
logger.info(msg + 'the network will remain private.')
logger.info('The UUID for the uploaded network is: %s' % network_id)
logger.info('View at: http://ndexbio.org/#/network/%s' % network_id)
return network_id
def add_to_network_set(network_id, set_id, ndex_cred=None):
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2('http://public.ndexbio.org',
username=username,
password=password)
logger.info('Adding network %s to network set %s' % (network_id, set_id))
nd.add_networks_to_networkset(set_id, [network_id])
def set_network_name(network_id, name, ndex_cred=None):
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2('http://public.ndexbio.org',
username=username,
password=password)
nd.set_network_properties(network_id,
[{'predicateString': 'name', 'value': name}])
def update_network(cx_str, network_id, ndex_cred=None):
"""Update an existing CX network on NDEx with new CX content.
Parameters
----------
cx_str : str
String containing the CX content.
network_id : str
UUID of the network on NDEx.
ndex_cred : dict
A dictionary with the following entries:
'user': NDEx user name
'password': NDEx password
"""
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2(server, username, password)
try:
logger.info('Getting network summary...')
summary = nd.get_network_summary(network_id)
except Exception as e:
logger.error('Could not get NDEx network summary.')
logger.error(e)
return
# Update network content
try:
logger.info('Updating network...')
cx_stream = io.BytesIO(cx_str.encode('utf-8'))
nd.update_cx_network(cx_stream, network_id)
except Exception as e:
logger.error('Could not update NDEx network.')
logger.error(e)
return
# Update network profile
ver_str = summary.get('version')
new_ver = _increment_ndex_ver(ver_str)
profile = {'name': summary.get('name'),
'description': summary.get('description'),
'version': new_ver,
}
logger.info('Updating NDEx network (%s) profile to %s',
network_id, profile)
time.sleep(5)
profile_retries = 3
for _ in range(profile_retries):
try:
nd.update_network_profile(network_id, profile)
logger.info('Updated NDEx network profile.')
break
except Exception as e:
logger.error('Could not update NDEx network profile.')
logger.error(e)
time.sleep(30)
set_style(network_id, ndex_cred)
def set_style(network_id, ndex_cred=None, template_id=None):
"""Set the style of the network to a given template network's style
Parameters
----------
network_id : str
The UUID of the NDEx network whose style is to be changed.
ndex_cred : dict
A dictionary of NDEx credentials.
template_id : Optional[str]
The UUID of the NDEx network whose style is used on the
network specified in the first argument.
"""
if not template_id:
template_id = "ea4ea3b7-6903-11e7-961c-0ac135e8bacf"
logger.info('Setting network style based on template: %s' %
template_id)
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
retries = 3
for _ in range(retries):
try:
time.sleep(5)
source_network = ndex2.create_nice_cx_from_server(
username=username, password=password, uuid=network_id,
server=server)
source_network.apply_template(server, template_id)
source_network.update_to(network_id, server=server,
username=username, password=password)
logger.info('Set network style.')
break
except Exception as e:
logger.error('Could not set style of NDEx network.')
logger.error(e)
def set_provenance(provenance, network_id, ndex_cred=None):
server = 'http://public.ndexbio.org'
username, password = get_default_ndex_cred(ndex_cred)
nd = ndex2.client.Ndex2(server, username, password)
try:
logger.info('Setting network provenance...')
nd.set_provenance(network_id, provenance)
except Exception as e:
logger.error('Could not set network provenance')
logger.exception(e)
def _increment_ndex_ver(ver_str):
if not ver_str:
new_ver = '1.0'
else:
major_ver, minor_ver = ver_str.split('.')
new_minor_ver = str(int(minor_ver) + 1)
new_ver = major_ver + '.' + new_minor_ver
return new_ver
|
py
|
1a5c858edfd51f9d58befaa18f097b6dd207c69a
|
# Generated by Django 2.2.9 on 2020-01-06 20:45
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=250)),
('subtitle', models.CharField(max_length=250)),
('author', models.CharField(max_length=100)),
('isbn', models.CharField(max_length=13)),
],
),
]
|
py
|
1a5c86f41021170b5e818efe2df84cc3be481ad4
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 5
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import isi_sdk_8_1_0
from isi_sdk_8_1_0.models.auth_access_access_item_share_share_permissions import AuthAccessAccessItemShareSharePermissions # noqa: E501
from isi_sdk_8_1_0.rest import ApiException
class TestAuthAccessAccessItemShareSharePermissions(unittest.TestCase):
"""AuthAccessAccessItemShareSharePermissions unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testAuthAccessAccessItemShareSharePermissions(self):
"""Test AuthAccessAccessItemShareSharePermissions"""
# FIXME: construct object with mandatory attributes with example values
# model = isi_sdk_8_1_0.models.auth_access_access_item_share_share_permissions.AuthAccessAccessItemShareSharePermissions() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py
|
1a5c87c355f5178f24cac3fa36b3b692003aebbf
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Dec 5 12:26:18 2020
@author: dmattox
"""
# import numpy as np
import collections
import numpy as np
from anytree import NodeMixin, RenderTree
def binary2decimal(binary):
# Accepts binary number as a string, returns decimal number as integer
out = 0
binary = binary[::-1] # Reverse direction of binary string
for i, b in enumerate(binary):
out += int(b) * (2 ** i)
return out
def decimal2binary(decimal):
# Accepts decimal number as integer, returns binary number as a string
out = ''
while decimal > 0:
out += str(decimal % 2) # bit equal to remainder of dividing by 2
decimal //= 2 # update value to iteger quotient
return out[::-1] # Reverse order of binary string and return it
def validParenth(gly):
# Returns true if all parentheses and brackets are paired and closed
stack = []
comp = {')': '(',
']': '['}
for p in gly:
if p in ['(',')','[',']','{','}']:
if p in comp.values():
stack.insert(0,p)
else:
if not stack:
return False
elif stack.pop(0) != comp[p]:
return False
if stack:
return False
else:
return True
def getSug(iupacGly):
mono = ''
for l in iupacGly[::-1]: # loop through string backwards and pop off first complete monosaccharide
if l not in ['[', ']', '(', ')']:
mono += l
else:
break
if mono == '':
raise SyntaxError('ERROR: Linkage punctuation found before any sugars, returning unchanged iupac glycan string')
mono = mono[::-1] # correct order for sugar iupac code
return(iupacGly[:(-1*len(mono))], mono)
def getLink(iupacGly):
link = ''
for l in iupacGly[::-1]: # loop through string backwards and pop off the link defined within the last closed parenthesises
if l != '(':
link += l
else:
link += l
break
link = link[::-1] # correct order for sugar iupac code
return(iupacGly[:(-1*len(link))], link[1:-1]) # Return iupac string without link, as well as the link without the parentheses
def getBranch(iupacGly):
# Pop an entire bracketed branch from the iupac string
branch = ''
nested = []
for l in iupacGly[::-1]:
if l == '[':
if nested == []:
branch += l
break
else:
branch += l
nested.pop(0)
elif l == ']' and branch != '': # If it hits a nested branch (BESIDES THE FIRST BRACKET)
branch += l
nested.append(l)
else:
branch += l
branch = branch[::-1] # Reverse back to correct order
return(iupacGly[:(-1*len(branch))], branch[1:-1]) # Return iupac string without branch, as well as the branch without the parentheses
# def decode(self, maxB, maxC, monoDict, anoDict = {'u': 0, 'a': 1, 'b': 2}):
# """
# Parameters (parameters used to generate encoding)
# ----------
# maxB : int
# Max value contained in a subway line in the root nodes of all glycans being considered (maximum number of observed branches).
# maxC : int
# Highest carbon number observed to participate in a glycosidic bond (from all glycans being considered).
# monoDict : dict
# Dictionary linking all observed monosaccharides to a corresponding integer (integers based on monosaccharide frequency rank).
# anoDict : dict, optional
# Dictionary encoding anomeric conformation information as integers. The default is {'u': 0, 'a': 1, 'b': 2}.
# Returns
# -------
# IUPAC string from encoding
# """
# pass
class GlyNode(NodeMixin):
# Class to hold nodes (rows) of tree representations
def __init__(self, base, ind, link = 'u0-0', parent = None):
super(GlyNode, self).__init__()
self.base = base
# self.depth = depth # Depth is an existing property of parent class
self.ind = ind
link = link.replace('(','') # clean extra parentheses
link = link.replace(')','')
self.anomeric = link[0] if link[0] in ['u','a','b'] else 'u' # state of anomeric carbon if indicated, otherwise assume unknown
self.parent = parent # parent GlyNode
self.name = '%s:%d*%d' % (self.base, self.depth, self.ind)
link = link[1:] if link[0] in ['u','a','b'] else link[:] # drop anomeric charcter if present
link = link.split('-') # Split linkage into child Carbon connection [0] and parent carbon connection [1]
self.Clinks = collections.Counter() # Holds the attachemnts at different carbon positions (based on formal carbon numbering)
if self.parent is None:
self.parentLink = 0
else:
self.Clinks[link[0]] = 2
self.parent.Clinks[link[1]] = 1
self.parentLink = link[1]
# self.parent.children.append(self) # Children handled by anytree parent class
# self.children = [] # handled by anytree parent class
self.subway = [] # Holds the indices of the subway lines that pass through the node
def __repr__(self):
return self.name
def __str__(self):
return self.name
def treePrint(self):
for pre, fill, node in RenderTree(self):
print("%s%s" % (pre, node.name))
def drawSubway(self, sbwyStp):
# Called by SugarBase.drawSbwyMap
self.subway.append(sbwyStp) # Add subway line for the terminal node
if self.parent is not None:
self.parent.drawSubway(sbwyStp) # Recursively pass the subway line up the tree towards the root node, adding the information to each node along the way
class SugarBase:
# Class to hold entires in SugarBase v2 database
def __init__(self, sbID, iupac, link, species, immunogenic):
self.sbID = sbID
self.iupac = iupac
self.link = link
self.species = [s.strip() for s in species.split(',')]
self.taxonomy = []
self.immunogenic = immunogenic
self.id = int(sbID.replace('SBID', ''))
self.tree = {}
# self.buildTree()
self.encoding = None
def __repr__(self):
return self.sbID
def __str__(self):
return self.sbID
def __int__(self):
return(self.id)
def print(self):
print('SugarBase ID:\n\t', self.id)
print('N/O Linked:\n\t', self.link)
if self.species[0] == '':
print('Species of origin:\n\tUnknown')
elif len(self.species) <= 2:
print('Species of origin: [', len(self.species),']\n\t', self.species)
else:
print('Species of origin: [', len(self.species),']\n\t', self.species[:3], '... (first 3, try print(SugarBase.species) to see the rest )')
print('Immunogenicity:\n\t', self.immunogenic)
print('IUPAC glycan:\n\t', self.iupac)
def treePrint(self):
if self.tree == {}:
print('ERROR: sugar tree not yet constructed')
else:
self.tree[(0,0)].treePrint()
def treeDepthCnt(self, depth):
# Returns the number of nodes in the tree at the given depth
cnt = 0
for k in self.tree.keys():
if k[0] == depth:
cnt += 1
return cnt
def buildTree(self):
if self.tree != {}:
print('WARNING: Tree already constructed, not rebuilding it')
elif (validParenth(self.iupac) == False):
raise SyntaxError('Unmatched parentheses or brackets detected in supplied IUPAC glyan string')
else:
gly = self.iupac
# add start token
par = (0,0)
self.tree[par] = GlyNode(base = 'START', ind = par[1])
# Process the root node
gly, base = getSug(gly)
chi = (1,0)
self.tree[chi] = GlyNode(base = base, ind = par[1], parent = self.tree[par])
par = chi
if gly: # if glycan is a monosaccharide, sets the queue to empty to avoid the while loop
branchQueue = [[gly,par]]
else:
branchQueue = []
while branchQueue:
if branchQueue[0][0][-1] != ')' and branchQueue[0][0][-1] != ']':
print('ERROR: no linkage or branch found for glycan ', self.sbID)
break
if branchQueue[0][0][-1] == ']':
par = branchQueue[0][1]
childLst = [] # Branching, at least 2 children from current parent node
while branchQueue[0][0][-1] == ']':
branchQueue[0][0], branch = getBranch(branchQueue[0][0])
childLst.append(branch)
childLst.append(branchQueue[0][0]) # add "main" branch to the list of branches as well
branchQueue.pop(0) # and remove it from the original queue
childLst.sort(key = lambda x: int(x[-2]), reverse = True) # sort all of the branches from the parent node by descending parentlink carbon number
for branch in childLst:
branchQueue.insert(0,[branch, par]) # Add braches to branch queue such that the lower numbered branches are on the top of the queue
chi = par # Since no monosacchairdes are removed, set chi to par to preserve true parent
if branchQueue[0][0][-1] == ')':
par = branchQueue[0][1]
chi = (par[0]+1, self.treeDepthCnt(par[0]+1)) # depth & index of child
branchQueue[0][0], link = getLink(branchQueue[0][0])
branchQueue[0][0], base = getSug(branchQueue[0][0])
self.tree[chi] = GlyNode(base, ind=chi[1], link=link, parent = self.tree[par])
if branchQueue[0][0] == '':
branchQueue.pop(0) # If a branch has been fully processed, remove it from the queue
else:
branchQueue[0][1] = chi # otherwise, update the parent for the remainder of the branch
# Add stop tokens to terminal monosaccharides
termNodes = []
for k,v in self.tree.items():
if v.children == ():
termNodes.append(k)
termNodes.sort(key= lambda x: x[1])
termNodes.sort(key= lambda x: x[0], reverse=True)
for par in termNodes:
chi = (par[0]+1, self.treeDepthCnt(par[0]+1)) # depth & index of child
self.tree[chi] = GlyNode('END', ind=chi[1], parent = self.tree[par])
def drawSbwyMap(self):
sbwyStps = []
for k,v in self.tree.items():
if v.children == ():
sbwyStps.append(k)
sbwyStps.sort(reverse = True)
for i,stp in enumerate(sbwyStps):
self.tree[stp].drawSubway(i)
def buildEncoding(self, maxB, maxC, monoDict, anoDict = {'u': 0, 'a': 1, 'b': 2}):
"""
Parameters
----------
maxB : int
Max value contained in a subway line in the root nodes of all glycans being considered (maximum number of observed branches).
maxC : int
Highest carbon number observed to participate in a glycosidic bond (from all glycans being considered).
monoDict : dict
Dictionary linking all observed monosaccharides to a corresponding integer (integers based on monosaccharide frequency rank).
anoDict : dict, optional
Dictionary encoding anomeric conformation information as integers. The default is {'u': 0, 'a': 1, 'b': 2}.
Returns
-------
None, builds encoding in self.encoding as a numpy array, where each row corresponds to a node/monosaccharide and each column corresponds to a descriptor for that node:
1 -- monosaccharide identity (represented by integer from monoDict)
2 -- anomeric conformation of the saccharide (0:unknown, 1: alpha, 2: beta)
3 -- Carbon postions on sacchride participating in glycosidic bonds as a binary converted to a decimal number
Ex 100001000 (C1 & C6 occupied) --> 264
4 -- "Subway lines" passing through the node as a binary converted to a decimal number
Ex 11111100000 (Root node on a glycan with 6 terminal non-reducing saccharides, all 6 subay lines pass through) --> 2016
5 -- The carbon position of the parent saccharide the node is connected to
6 -- depth
7 -- index (differentiate saccharides at the same depth on different branches)
"""
colNames = ['sugar'] + ['anomeric'] + ['C_links'] + ['B_lines'] + ['parLink', 'sDepth', 'sInd']
self.encoding = np.zeros((len(self.tree.keys()), len(colNames)), dtype = int) # Initialize 2D array to store encoding
for i,nodeKey in enumerate(list(self.tree.keys())):
base = self.tree[nodeKey] # GlyNode object for the current saccharide
# Prep col 3 value (occupied carbons)
carbLinks = [str(base.Clinks[str(i)]) for i in range(1,maxC+1)]
carbLinks = ['1' if c != '0' else '0' for c in carbLinks] # Drop parent/child linked info from each carbon position
C_binary = ''.join(carbLinks)
# Prep col 4 value (subway lines)
sbwyLines = ['1' if i in base.subway else '0' for i in range(maxB+1)]
B_binary = ''.join(sbwyLines)
# Columns 5-7
liDeIn = [int(base.parentLink), base.depth, base.ind] # link, depth, index info for sugar & parent
self.encoding[i,] = [monoDict[base.base], anoDict[base.anomeric], binary2decimal(C_binary), binary2decimal(B_binary)] + liDeIn
|
py
|
1a5c87dace7aee4ce151b37ffd820734ccce1bed
|
'''Utilities relating to interaction with Marathon
************************************************************************
FOR THE TIME BEING WHATEVER MODIFICATIONS ARE APPLIED TO THIS FILE
SHOULD ALSO BE APPLIED TO sdk_marathon IN ANY OTHER PARTNER REPOS
************************************************************************
'''
import logging
import json
import os
import tempfile
import retrying
import shakedown
import sdk_cmd
import sdk_metrics
TIMEOUT_SECONDS = 15 * 60
log = logging.getLogger(__name__)
def _get_config_once(app_name):
return sdk_cmd.cluster_request('GET', _api_url('apps/{}'.format(app_name)), retry=False)
def get_app_id(service_name):
# service_name may already contain a leading slash.
return '/' + service_name.lstrip('/')
def wait_for_deployment_and_app_removal(app_id, timeout=TIMEOUT_SECONDS):
"""
Waits for application to be gone, according to Marathon.
"""
log.info('Waiting for no deployments for {}'.format(app_id))
shakedown.deployment_wait(timeout, app_id)
client = shakedown.marathon.create_client()
def marathon_dropped_app():
app_ids = [app['id'] for app in client.get_apps()]
log.info('Marathon app IDs: {}'.format(app_ids))
matching_app_ids = list(filter(lambda x: x == app_id, app_ids))
if len(matching_app_ids) > 1:
log.warning('Found multiple apps with id {}'.format(app_id))
return len(matching_app_ids) == 0
log.info('Waiting for no {} Marathon app'.format(app_id))
shakedown.time_wait(marathon_dropped_app, timeout_seconds=timeout)
@retrying.retry(stop_max_attempt_number=5,
wait_fixed=5000,
retry_on_exception=lambda e: isinstance(e, Exception))
def retried_wait_for_deployment_and_app_removal(*args, **kwargs):
wait_for_deployment_and_app_removal(*args, **kwargs)
def app_exists(app_name):
try:
_get_config_once(app_name)
return True
except Exception:
return False
def get_config(app_name, timeout=TIMEOUT_SECONDS):
# Be permissive of flakes when fetching the app content:
@retrying.retry(
wait_fixed=1000,
stop_max_delay=timeout * 1000)
def wait_for_response():
return _get_config_once(app_name).json()['app']
config = wait_for_response()
# The configuration JSON that marathon returns doesn't match the configuration JSON it accepts,
# so we have to remove some offending fields to make it re-submittable, since it's not possible to
# submit a partial config with only the desired fields changed.
if 'uris' in config:
del config['uris']
if 'version' in config:
del config['version']
return config
def is_app_running(app: dict) -> bool:
return (app.get('tasksStaged', 0) == 0 and app.get('tasksUnhealthy', 0) == 0 and app.get('tasksRunning', 0) > 0)
def wait_for_app_running(app_name: str, timeout: int) -> None:
@retrying.retry(stop_max_delay=timeout,
wait_fixed=5000,
retry_on_result=lambda result: not result)
def _wait_for_app_running(app_name: str) -> bool:
cmd = 'marathon app show {}'.format(app_name)
log.info('Running %s', cmd)
app = sdk_cmd.get_json_output(cmd)
return is_app_running(app)
_wait_for_app_running(app_name)
def wait_for_deployment_and_app_running(app_name: str, timeout: int) -> None:
shakedown.deployment_wait(timeout, app_name)
wait_for_app_running(app_name, timeout)
def install_app_from_file(app_name: str, app_def_path: str) -> (bool, str):
"""
Installs a marathon app using the path to an app definition.
Args:
app_def_path: Path to app definition
Returns:
(bool, str) tuple: Boolean indicates success of install attempt. String indicates
error message if install attempt failed.
"""
cmd = "marathon app add {}".format(app_def_path)
log.info("Running %s", cmd)
rc, stdout, stderr = sdk_cmd.run_raw_cli(cmd)
if rc or stderr:
log.error("returncode=%s stdout=%s stderr=%s", rc, stdout, stderr)
return False, stderr
if "Created deployment" not in stdout:
stderr = "'Created deployment' not in STDOUT"
log.error(stderr)
return False, stderr
log.info('Waiting for app %s to be deployed and running...', app_name)
wait_for_deployment_and_app_running(app_name, TIMEOUT_SECONDS)
return True, ''
def install_app(app_definition: dict) -> (bool, str):
"""
Installs a marathon app using the given `app_definition`.
Args:
app_definition: The definition of the app to pass to marathon.
Returns:
(bool, str) tuple: Boolean indicates success of install attempt. String indicates
error message if install attempt failed.
"""
app_name = app_definition["id"]
with tempfile.TemporaryDirectory() as d:
app_def_file = "{}.json".format(app_name.replace('/', '__'))
log.info("Launching {} marathon app".format(app_name))
app_def_path = os.path.join(d, app_def_file)
log.info("Writing app definition to %s", app_def_path)
with open(app_def_path, "w") as f:
json.dump(app_definition, f)
return install_app_from_file(app_name, app_def_path)
def update_app(app_name, config, timeout=TIMEOUT_SECONDS, wait_for_completed_deployment=True, force=True):
if "env" in config:
log.info("Environment for marathon app {} ({} values):".format(app_name, len(config["env"])))
for k in sorted(config["env"]):
log.info(" {}={}".format(k, config["env"][k]))
query_string = "?force=true" if force else ""
# throws on failure:
sdk_cmd.cluster_request('PUT', _api_url('apps/{}{}'.format(app_name, query_string)), log_args=False, json=config)
if wait_for_completed_deployment:
log.info("Waiting for Marathon deployment of {} to complete...".format(app_name))
shakedown.deployment_wait(app_id=app_name, timeout=timeout)
def destroy_app(app_name):
shakedown.delete_app_wait(app_name)
def restart_app(app_name):
log.info("Restarting {}...".format(app_name))
# throws on failure:
sdk_cmd.cluster_request('POST', _api_url('apps/{}/restart'.format(app_name)))
log.info("Restarted {}.".format(app_name))
def _api_url(path):
return '/marathon/v2/{}'.format(path)
def get_scheduler_host(service_name):
# Marathon mangles foldered paths as follows: "/path/to/svc" => "svc.to.path"
task_name_elems = service_name.lstrip('/').split('/')
task_name_elems.reverse()
app_name = '.'.join(task_name_elems)
ips = shakedown.get_service_ips('marathon', app_name)
if len(ips) == 0:
raise Exception('No IPs found for marathon task "{}". Available tasks are: {}'.format(
app_name, [task['name'] for task in shakedown.get_service_tasks('marathon')]))
return ips.pop()
def bump_cpu_count_config(service_name, key_name, delta=0.1):
config = get_config(service_name)
updated_cpus = float(config['env'][key_name]) + delta
config['env'][key_name] = str(updated_cpus)
update_app(service_name, config)
return updated_cpus
def bump_task_count_config(service_name, key_name, delta=1):
config = get_config(service_name)
updated_node_count = int(config['env'][key_name]) + delta
config['env'][key_name] = str(updated_node_count)
update_app(service_name, config)
def get_mesos_api_version(service_name):
return get_config(service_name)['env']['MESOS_API_VERSION']
def set_mesos_api_version(service_name, api_version, timeout=600):
'''Sets the mesos API version to the provided value, and then verifies that the scheduler comes back successfully'''
config = get_config(service_name)
config['env']['MESOS_API_VERSION'] = api_version
update_app(service_name, config, timeout=timeout)
# wait for scheduler to come back and successfully receive/process offers:
sdk_metrics.wait_for_scheduler_counter_value(service_name, 'offers.processed', 1, timeout_seconds=timeout)
|
py
|
1a5c87f18b01822ec32c651c5da44c45ac9c69b4
|
from .lock import Lock # noqa
from .semaphore import Semaphore # noqa
from .services import get_local_checks, get_failed_cluster_checks # noqa
|
py
|
1a5c88bfd8a265711ab46f914ab88203ec3a1883
|
from unittest.mock import MagicMock
import pytest
from noti import GithubPR
from fixtures import github as mock
class TestGithubPR:
@pytest.fixture
def default_pr(self):
return GithubPR(MagicMock(), mock.DummyPR())
def test_title(self, default_pr):
assert default_pr.title == '_title_'
def test_ci_status(self, default_pr):
default_pr._repo.get_commit.return_value.get_combined_status.return_value.statuses = [1]
default_pr._repo.get_commit.return_value.get_combined_status.return_value.state = 'success'
assert default_pr.ci_status == 'success'
default_pr._repo.get_commit.assert_called_with('_sha_')
default_pr._repo.get_commit.return_value.get_combined_status.assert_called_once()
def test_ci_status_no_statuses(self, default_pr):
default_pr._repo.get_commit.return_value.get_combined_status.return_value.state = 'pending'
assert default_pr.ci_status == ''
default_pr._repo.get_commit.assert_called_with('_sha_')
default_pr._repo.get_commit.return_value.get_combined_status.assert_called_once()
def test_failed_pipeline_jobs(self, default_pr):
default_pr._status = MagicMock()
default_pr._status.statuses = [
mock.DummyBuild('failure'),
mock.DummyBuild(),
mock.DummyBuild('failure'),
mock.DummyBuild(),
mock.DummyBuild('failure')
]
assert len(default_pr.failed_pipeline_jobs) == 3
def test_approved(self, default_pr):
assert default_pr.approved
def test_not_approved(self):
pr = GithubPR(MagicMock(), mock.DummyPR(mergeable_state='blocked'))
assert not pr.approved
def test_url(self, default_pr):
assert default_pr.url == '_url_'
def test_branch(self, default_pr):
assert default_pr.branch == '_branch_'
def test_reviews(self):
comments = [
mock.DummyComment(),
mock.DummyComment(),
mock.DummyComment()
]
pr = GithubPR(MagicMock(), mock.DummyPR(comments=comments))
assert len(pr.reviews) == 3
|
py
|
1a5c89019a0f63aeb417c7df1ba918f31fdef4ce
|
import serial
import time
ports = {
"fpga_out": "COM16",
"fpga_in": "COM15",
"laser2": "COM14",
"laser1": "COM12",
"x_motor": "COM19",
"y_motor": "COM18",
}
fpga_out = serial.Serial(ports["fpga_out"], 115200)
fpga_in = serial.Serial(ports["fpga_in"], 115200)
y_motor = serial.Serial(ports["y_motor"], 9600)
fpga_out.name = "fpga_out"
fpga_in.name = "fpga_in"
y_motor.name = "y_motor"
def send(interface, cmd, receive_interface=None):
print(interface.name, cmd.encode('utf8'))
if receive_interface is None:
receive_interface = interface
interface.write(cmd.encode('utf8'))
time.sleep(0.1)
ret = b""
while receive_interface.in_waiting == 0:
pass
while receive_interface.in_waiting > 0:
ret += receive_interface.readline().rstrip() + b"\n"
return ret.decode('utf8')
res = send(y_motor, "1G\r")
print(res)
time.sleep(2)
res = send(y_motor, "1D\r")
print(res)
y_motor_pos = int(res.split('*')[1])
res = send(fpga_out, "TDIYERD\n", fpga_in)
print(res)
encoder_pos = int(res.split(' ')[1])
res = send(fpga_out, "TDIYPOS {}\n".format(encoder_pos - 100000), fpga_in)
print(res)
res = send(fpga_out, "TDIYARM2 8000 1\n", fpga_in)
print(res)
send(fpga_out, "TDIYWAIT\n")
time.sleep(1)
res = send(y_motor, "1D{}\r1G\r".format(y_motor_pos - 420000))
print(res)
time.sleep(10)
res = send(fpga_out, "TDIYERD\n", fpga_in)
print(res)
res = send(y_motor, "1D{}\r1G\r".format(y_motor_pos))
print(res)
res = send(fpga_out, "TDIYERD\n", fpga_in)
print(res)
|
py
|
1a5c8adbe09f50f70df6fa586af133403eb168d9
|
#!/usr/bin/env python
from nose.tools import *
from networkx import *
from networkx.generators.random_graphs import *
class TestGeneratorsRandom():
def smoke_test_random_graph(self):
seed = 42
G=gnp_random_graph(100,0.25,seed)
G=binomial_graph(100,0.25,seed)
G=erdos_renyi_graph(100,0.25,seed)
G=fast_gnp_random_graph(100,0.25,seed)
G=gnm_random_graph(100,20,seed)
G=dense_gnm_random_graph(100,20,seed)
G=watts_strogatz_graph(10,2,0.25,seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 10)
G=connected_watts_strogatz_graph(10,2,0.1,seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 10)
G=watts_strogatz_graph(10,4,0.25,seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 20)
G=newman_watts_strogatz_graph(10,2,0.0,seed)
assert_equal(len(G), 10)
assert_equal(G.number_of_edges(), 10)
G=newman_watts_strogatz_graph(10,4,0.25,seed)
assert_equal(len(G), 10)
assert_true(G.number_of_edges() >= 20)
G=barabasi_albert_graph(100,1,seed)
G=barabasi_albert_graph(100,3,seed)
assert_equal(G.number_of_edges(),(97*3))
G=powerlaw_cluster_graph(100,1,1.0,seed)
G=powerlaw_cluster_graph(100,3,0.0,seed)
assert_equal(G.number_of_edges(),(97*3))
G=random_regular_graph(10,20,seed)
assert_raises(networkx.exception.NetworkXError,
random_regular_graph, 3, 21)
constructor=[(10,20,0.8),(20,40,0.8)]
G=random_shell_graph(constructor,seed)
G=nx.random_lobster(10,0.1,0.5,seed)
def test_random_zero_regular_graph(self):
"""Tests that a 0-regular graph has the correct number of nodes and
edges.
"""
G = random_regular_graph(0, 10)
assert_equal(len(G), 10)
assert_equal(len(G.edges()), 0)
def test_gnp(self):
for generator in [gnp_random_graph, binomial_graph, erdos_renyi_graph,
fast_gnp_random_graph]:
G = generator(10, -1.1)
assert_equal(len(G), 10)
assert_equal(len(G.edges()), 0)
G = generator(10, 0.1)
assert_equal(len(G), 10)
G = generator(10, 0.1, seed=42)
assert_equal(len(G), 10)
G = generator(10, 1.1)
assert_equal(len(G), 10)
assert_equal(len(G.edges()), 45)
G = generator(10, -1.1, directed=True)
assert_true(G.is_directed())
assert_equal(len(G), 10)
assert_equal(len(G.edges()), 0)
G = generator(10, 0.1, directed=True)
assert_true(G.is_directed())
assert_equal(len(G), 10)
G = generator(10, 1.1, directed=True)
assert_true(G.is_directed())
assert_equal(len(G), 10)
assert_equal(len(G.edges()), 90)
# assert that random graphs generate all edges for p close to 1
edges = 0
runs = 100
for i in range(runs):
edges += len(generator(10, 0.99999, directed=True).edges())
assert_almost_equal(edges/float(runs), 90, delta=runs*2.0/100)
def test_gnm(self):
G=gnm_random_graph(10,3)
assert_equal(len(G),10)
assert_equal(len(G.edges()),3)
G=gnm_random_graph(10,3,seed=42)
assert_equal(len(G),10)
assert_equal(len(G.edges()),3)
G=gnm_random_graph(10,100)
assert_equal(len(G),10)
assert_equal(len(G.edges()),45)
G=gnm_random_graph(10,100,directed=True)
assert_equal(len(G),10)
assert_equal(len(G.edges()),90)
G=gnm_random_graph(10,-1.1)
assert_equal(len(G),10)
assert_equal(len(G.edges()),0)
def test_watts_strogatz_big_k(self):
assert_raises(networkx.exception.NetworkXError,
watts_strogatz_graph, 10, 10, 0.25)
assert_raises(networkx.exception.NetworkXError,
newman_watts_strogatz_graph, 10, 10, 0.25)
# could create an infinite loop, now doesn't
# infinite loop used to occur when a node has degree n-1 and needs to rewire
watts_strogatz_graph(10, 9, 0.25, seed=0)
newman_watts_strogatz_graph(10, 9, 0.5, seed=0)
|
py
|
1a5c8af2a928b1dec4b9f66dfb7426fe46060537
|
# Copyright 2018-2019 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
py
|
1a5c8b0628295a373af9938e3b7fae3b1182922c
|
from fastapi import FastAPI
from main_types import ModelName
app = FastAPI()
@app.get("/")
async def welcome():
return {"welcome": "To have here or take away?. Don't forget the cutlery!"}
@app.get("/items/{item_id}")
async def read_item(item_id: int):
return {"item_id": item_id}
@app.get("/float/{f_num}")
async def read_float(f_num: float):
return {"float_num": f_num}
@app.get("/model/{model_name}")
async def get_model(model_name: ModelName):
res = dict()
res["model_name"] = model_name
res["message"] = "Have some residuals"
if model_name == ModelName.alexnet:
res["message"] = "Deep Learning FTW!"
elif model_name.value == "lenet":
res["message"] = "LeCNN all the images"
return res
|
py
|
1a5c8b1bb8b2e2eac7eaef8dc0c5e32651db3e4f
|
# Import librerias
import pandas
import matplotlib.pyplot as graficar
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA as ACP
from sklearn.cross_validation import train_test_split as separar
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error as ECM
from sklearn.ensemble import RandomForestRegressor
# Varibale global
GRAFICAR = True
juegos = pandas.read_csv("games.csv")
# Imprime las columnas que se leyeron
print("-"*5 + "Columnas" + "-"*5)
print(juegos.columns)
print()
# Imprime cuantas filas, columnas tenemos
print("-"*5 + "Tamaño de DataSet (Filas, Columnas)" + "-"*5)
print(juegos.shape)
print()
# Suponiendo que queremos predecir que puntuacion en promedio
# le harian los usuarios a un juego que aun no ha salido. Esta informacion
# se encuentra en la columna average_rating
# Hacemos un histograma de esta columna para saber la distribucion
# de las puntiaciones en promedio de todos los juegos usando
# Lee DataSet
# indexacion por columna que retorna toda una columna
if GRAFICAR:
graficar.title("Distribucion de puntuacion promedio")
graficar.xlabel("Puntuacion promedio")
graficar.ylabel("# de Juegos")
graficar.hist(juegos["average_rating"])
graficar.show()
graficar.clf()
# juegos[juegos["average_rating"] == 0] retornara un dataframe con solo las
# filas donde el valor de la columna average_rating es 0
# Indexando por posicion, podemos obtener toda una fila
# juegos.iloc[0] retornara toda la primera fila del dataframe juegos
# juegos.iloc[0,0] retornara la primera columna de la primera fila del dataframe
print("-"*5 + "Diff entre juego con puntaje de 0 y con puntaje superior a 0" + "-"*5)
print(juegos[juegos["average_rating"] == 0].iloc[0])
print(juegos[juegos["average_rating"] > 0].iloc[0])
print()
# Se determina que deben haber muchos juegos con 0 puntuaciones de usuarios
# por lo tanto el promedio de puntuaciones es 0
# Esta informacion se considera basura asi que se opta por eliminar los juegos
# que no hayan sido puntuados por algun usuarios
juegos = juegos[juegos["users_rated"] > 0]
# Remueve cualquier fila que le hagan falta valores
juegos = juegos.dropna(axis=0)
# Distribucion de puntuacion promedio
if GRAFICAR:
graficar.title("Distribucion de puntuacion promedio")
graficar.xlabel("Puntuacion promedio")
graficar.ylabel("# de Juegos")
graficar.hist(juegos["average_rating"])
graficar.show()
graficar.clf()
# Imprime cuantas filas, columnas tenemos
print("-"*5 + "Tamaño de DataSet (Filas, Columnas)" + "-"*5)
print(juegos.shape)
print()
# Análisis de grupos o agrupamiento
# es la tarea de agrupar un conjunto de objetos de tal manera que los miembros
# del mismo grupo (llamado clúster) sean más similares, en algún sentido u otro.
#EXPO
#Es la tarea principal de la minería de datos exploratoria y es una técnica común en el análisis de datos estadísticos. Además es utilizada en múltiples campos como el aprendizaje automático, el reconocimiento de patrones, el análisis de imágenes, la búsqueda y recuperación de información, la bioinformática, la compresión de datos y la computación gráfica.
# Un ejemplo de grupo son los juegos que no tenian puntuacion
# Se usara K-means, un metodo de agrupamiento donde cada elemento hará parte
# cuyo valor promedio se acerque mas
#EXPO
# K-means es un método de agrupamiento, que tiene como objetivo la partición de un conjunto de n observaciones en k grupos en el que cada observación pertenece al grupo cuyo valor medio es más cercano. Es un método utilizado en minería de datos.
# Se crea el modelo con 5 clusters y una semilla random de 1
modelo_kmeans = KMeans(n_clusters=5, random_state=1)
# Se quitan todos los tipos de datos que no sean numericos
columnas_numero = juegos._get_numeric_data()
# Se agrega la informacion al modelo
modelo_kmeans.fit(columnas_numero)
# Se obtienen las etiquetas de los clusters
etiquetas = modelo_kmeans.labels_
# Para visualizar los clusters o grupos, es necesario reducir el numero de
# columnas debido a que cada columna aumentara el grafico en 1 dimension
# asi que se usa Análisis de Componentes Principales (En español ACP, en inglés, PCA)
# es una tecnica para reducir la dimensionalidad de un conjunto de datos usando
# correlacion entre columnas
# Se crea modelo ACP
acp_2 = ACP(2)
# Se obtienen que columnas graficar
columnas_a_graficar = acp_2.fit_transform(columnas_numero)
if GRAFICAR:
# Se crea la grafica
graficar.title("Agrupacion de juegos en 5 clusters con ACP")
graficar.scatter(x=columnas_a_graficar[:,0], y=columnas_a_graficar[:,1], c=etiquetas)
graficar.show()
graficar.clf()
# Inteligencia artificial
# Para esto hay que determinar como se medira el error y que se va a predecir
# PREDECIR -> average_rating o el puntaje promedio de un juego
# ERROR
# Aqui se tiene en cuenta que se esta haciendo
# Regresion & variables continuas != Clasificacion & variables discretas
# En este caso se usara Error Cuadrático Medio (En español ECM, en inglés, MSE)
# porque es rapido de calcular y determina el promedio de que tan distantes
# estan las predicciones de los valores reales
# CORRELACION
# Sabiendo que se quiere predecir average_rating o el puntaje promedio de un juego
# es momento de decidir que columnas son de mayor interes para esto.
# Para esto se calculara la correlacion entre average_rating y el resto de columnas
print("-"*5 + "Correlacion de average_rating" + "-"*5)
print(juegos.corr()["average_rating"])
print()
# De aqui podemos decir que id y average_weight tienen mayor correlacion
# [ID]
# Suponiendo que este valor es dado cuando se agrega un juego
# es posible que los juegos mas nuevos tienen mejores puntuaciones
# tal vez al principio de BoardGameGeek los usuarios eran menos amables
# o que los juegos viejos tenian menos calidad
# [average_weight]
# Es posible que los juegos mas complejos hayan sido puntuados mejor
# Columnas para predecir
# Hay que remover las columnas no numericas
# Hay que remover las columnas que se calculen usando la columna a predecir average_rating
# se quitan "bayes_average_rating", "average_rating", "type", "name"
# Obtiene lista de columnas
columnas = juegos.columns.tolist()
# Filtrado de columnas, lo cual nos da los predictores
columnas = [columna for columna in columnas if columna not in ["bayes_average_rating", "average_rating", "type", "name"]]
# Se guarda la columna que se intentara predecir
columna_a_predecir = "average_rating"
# Es necesario separar el DataSet que se tiene en set para entrenamiento y set para pruebas
# Si no se hace, se consigue overfitting o sobre-ajuste que es sobre-entrenar un algoritmo
# de aprendizaje con un cierto set para los cuales ya conoce el resultado
# Ej: Si aprendes 1+1=2 y 2+2=4, seras capaz de responder con 0 errores
# Pero si te preguntan por 3+3, no seras capaz de resolverlo
# Por eso es necesario aprender de forma general
# Como norma, si el algoritmo de aprendizaje produce una cantidad de errores baja
# es recomendable revisar que no se este presentando un sobre-ajuste
# En este caso se usara el 80% del DataSet para entrenar y el 20% para probar
# Se crea el set de entrenamiento y de pruebas
set_entrenamiento = juegos.sample(frac=0.8, random_state=1)
set_test = juegos.loc[~juegos.index.isin(set_entrenamiento.index)]
# Imprime tamaño de ambos sets
print("-"*5 + "Tamaño de set_entrenamiento (Filas, Columnas)" + "-"*5)
print(set_entrenamiento.shape)
print()
print("-"*5 + "Tamaño de set_test (Filas, Columnas)" + "-"*5)
print(set_test.shape)
print()
# Se crea el modelo
modelo = LinearRegression()
# Se añaden los DataSets al modelo, el primero son los predictores y el segundo, el objetivo
modelo.fit(set_entrenamiento[columnas], set_entrenamiento[columna_a_predecir])
# Se crean predicciones
predicciones = modelo.predict(set_test[columnas])
print("-"*5 + "Predicciones" + "-"*5)
print(predicciones)
print("-"*5 + "VS" + "-"*5)
print(juegos.tail(1)["average_rating"])
print()
# Calcula error entre prediccion y los valores reales
print("-"*5 + "Error en prediccion" + "-"*5)
print(ECM(predicciones, set_test[columna_a_predecir]))
print()
# Grafica predicciones vs reales con regresion lineal
if GRAFICAR:
graficar.figure("lineal")
graficar.title("Regresion lineal")
graficar.xlabel("ID Juego")
graficar.ylabel("Puntuacion promedio")
graficar.scatter(set_test["id"], set_test["average_rating"], label="Real")
graficar.scatter(set_test["id"], predicciones, label="Prediccion")
graficar.legend(loc="upper left")
# FIN DE REGRESSION LINEAL
# Aunque Scikit-learn nos permite usar otro algoritmo, se usara
# random forest que es capaz de encontrar correlaciones entre DataSets no lineales
# cosa que la Regresion lineal no seria capaz
# EJ: si minage o edad minima para un juego afecta a la puntuacion
# edad < 5, el puntaje es bajo
# edad 5-10, el puntaje es alto
# edad 10-15, el puntaje es bajo
print("-"*5 + "Usando RANDOM FOREST" + "-"*5)
# Se crea el modelo
modelo = RandomForestRegressor(n_estimators=100, min_samples_leaf=10, random_state=1)
# Se pasan los DataSets
modelo.fit(set_entrenamiento[columnas], set_entrenamiento[columna_a_predecir])
# Se hace la prediccion
predicciones = modelo.predict(set_test[columnas])
print("-"*5 + "Predicciones" + "-"*5)
print(predicciones)
print("-"*5 + "VS" + "-"*5)
print(juegos.tail(1)["average_rating"])
print()
# Calcula el error
print("-"*5 + "Error en prediccion" + "-"*5)
print(ECM(predicciones, set_test[columna_a_predecir]))
print()
# Grafica predicciones vs reales con regresion random forest
if GRAFICAR:
graficar.figure("random")
graficar.title("Regresion Random Forest")
graficar.xlabel("ID Juego")
graficar.ylabel("Puntuacion promedio")
graficar.scatter(set_test["id"], set_test["average_rating"], label="Real")
graficar.scatter(set_test["id"], predicciones, label="Prediccion")
graficar.legend(loc="upper left")
# Muestra todas las graficas que esten cargadas
graficar.show()
|
py
|
1a5c8b38460a2737d6f6a0fc69f62bf60e4c1858
|
#!/usr/bin/python
from paillier import *
from candidate import *
from voter import *
from election_board import *
from bulletin_board import *
import sys
import os
from Tkinter import *
import traceback
def submitVote():
global userPick
global userPIN
# print voters
if str(userPIN.get()).strip() in voters and (userPick.get()) != '':
if not voters[str(userPIN.get()).strip()].voted:
u_vote = []
for c in range(len(candidates)):
v = 0
if int(userPick.get()) == c:
v = 1
u_vote.append(encrypt(eb.public_key, v))
blind_signed_vote = []
for v in u_vote:
## We want to blind sign each vote. So blind it,
blinding_factor, blinded_msg = bs.blind(v, eb.public_signing_key)
signed = eb.blind_sign(blinded_msg)
unblinded = bs.unblind(signed, blinding_factor, eb.public_signing_key)
blind_signed_vote.append((unblinded, blinding_factor))
if not eb.has_voter_voted(str(userPIN.get().strip())):
bb.addVote(userPIN.get().strip(), u_vote, blind_signed_vote)
voters[str(userPIN.get()).strip()].voted = True
userPick = StringVar()
userPIN = StringVar()
toplevel.destroy()
def castVote():
global canCast
if canCast:
global toplevel
toplevel = Toplevel()
toplevel.geometry("600x800+200+200")
toplevel.focus_force()
label = Label(toplevel, text="Enter your voting ID", height=0, width=100)
label.pack()
e = Entry(toplevel,textvariable=userPIN)
e.pack()
for c in range(len(candidates)):
b = Radiobutton(toplevel, text=candidates[c].name, variable=userPick, value=c)
b.pack(anchor=W)
toplevel.focus_force()
b = Button(toplevel, text="Submit Vote", width=20, command=submitVote)
b.pack(side='bottom',padx=0,pady=0)
def endVoting():
global isOver
global canCast
global b
global button1
if not isOver:
isOver = True
canCast = False
e = bb.endElection()
final = ''
global resultsLabel
for candidate in e:
final += 'Number of votes for %s is %d\n'%(candidate.name, candidate.numVotes)
resultsLabel = Label(app, text=final, height=0, width=100)
resultsLabel.pack()
b.pack_forget()
button1.pack_forget()
if __name__ == "__main__":
## Get an instance of the election board
isOver = False
canCast = True
eb = ElectionBoard.Instance()
bb = BulletinBoard.Instance()
ca = CountingAuthority.Instance()
## Register voters and candidates
voters = {}
for line in open('voters.txt'):
parsed = line.strip().split(',')
voters[parsed[1].strip()] = Voter(parsed[0].strip(),parsed[1].strip())
candidates = []
for line in open("candidates.txt"):
candidates.append(Candidate(line.strip(), encrypt(eb.public_key, 0)))
eb.register_voters(voters)
eb.register_candidates(candidates)
app = Tk()
toplevel = None
app.title("Totally Secure and Legit Voting Machine 3000")
app.geometry("300x200+200+200")
userPick = StringVar()
userPIN = StringVar()
resultsLabel = None
b = Button(app, text="End Voting", width=20, command=endVoting)
button1 = Button(app, text="Cast Your Vote", width=20, command=castVote)
b.pack(side='bottom',padx=0,pady=0)
button1.pack(side='bottom',padx=5,pady=5)
app.mainloop()
|
py
|
1a5c8b623307f7248f06cdcb5ee0228fa28b1a15
|
log_level = 'INFO'
load_from = None
resume_from = None
dist_params = dict(backend='nccl')
workflow = [('train', 1)]
checkpoint_config = dict(interval=10)
evaluation = dict(interval=20, metric='mAP', key_indicator='AP')
optimizer = dict(
type='Adam',
lr=0.0015,
)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=0.001,
step=[200, 260])
total_epochs = 100 # 300
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook')
])
channel_cfg = dict(
num_output_channels=14,
dataset_joints=14,
dataset_channel=[
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
],
inference_channel=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13])
data_cfg = dict(
image_size=512,
base_size=256,
base_sigma=2,
heatmap_size=[128, 256],
num_joints=channel_cfg['dataset_joints'],
dataset_channel=channel_cfg['dataset_channel'],
inference_channel=channel_cfg['inference_channel'],
num_scales=2,
scale_aware_sigma=False,
)
# model settings
model = dict(
type='BottomUp',
pretrained='https://download.openmmlab.com/mmpose/'
'pretrain_models/hrnet_w32-36af842e.pth',
backbone=dict(
type='HRNet',
in_channels=3,
extra=dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)),
stage4=dict(
num_modules=3,
num_branches=4,
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))),
),
keypoint_head=dict(
type='BottomUpHigherResolutionHead',
in_channels=32,
num_joints=14,
tag_per_joint=True,
extra=dict(final_conv_kernel=1, ),
num_deconv_layers=1,
num_deconv_filters=[32],
num_deconv_kernels=[4],
num_basic_blocks=4,
cat_output=[True],
with_ae_loss=[True, False]),
train_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
img_size=data_cfg['image_size']),
test_cfg=dict(
num_joints=channel_cfg['dataset_joints'],
max_num_people=30,
scale_factor=[1],
with_heatmaps=[True, True],
with_ae=[True, False],
project2image=True,
nms_kernel=5,
nms_padding=2,
tag_per_joint=True,
detection_threshold=0.1,
tag_threshold=1,
use_detection_val=True,
ignore_too_much=False,
adjust=True,
refine=True,
flip_test=True),
loss_pose=dict(
type='MultiLossFactory',
num_joints=14,
num_stages=2,
ae_loss_type='exp',
with_ae_loss=[True, False],
push_loss_factor=[0.001, 0.001],
pull_loss_factor=[0.001, 0.001],
with_heatmaps_loss=[True, True],
heatmaps_loss_factor=[1.0, 1.0],
),
)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='BottomUpRandomAffine',
rot_factor=30,
scale_factor=[0.75, 1.5],
scale_type='short',
trans_factor=40),
dict(type='BottomUpRandomFlip', flip_prob=0.5),
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
dict(
type='BottomUpGenerateTarget',
sigma=2,
max_num_people=30,
),
dict(
type='Collect',
keys=['img', 'joints', 'targets', 'masks'],
meta_keys=[]),
]
val_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='BottomUpGetImgSize', test_scale_factor=[1]),
dict(
type='BottomUpResizeAlign',
transforms=[
dict(type='ToTensor'),
dict(
type='NormalizeTensor',
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
]),
dict(
type='Collect',
keys=['img'],
meta_keys=[
'image_file', 'aug_data', 'test_scale_factor', 'base_size',
'center', 'scale', 'flip_index'
]),
]
test_pipeline = val_pipeline
data_root = 'data/aic'
data = dict(
samples_per_gpu=4, # 24
workers_per_gpu=2,
train=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_train.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
'keypoint_train_images_20170902/',
data_cfg=data_cfg,
pipeline=train_pipeline),
val=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline),
test=dict(
type='BottomUpAicDataset',
ann_file=f'{data_root}/annotations/aic_val.json',
img_prefix=f'{data_root}/ai_challenger_keypoint_validation_20170911/'
'keypoint_validation_images_20170911/',
data_cfg=data_cfg,
pipeline=val_pipeline),
)
# data_root3 = 'data/image_keypoint'
# data_root = 'data/aic'
# data = dict(
# samples_per_gpu=8, # 24
# workers_per_gpu=2,
# train=dict(
# type='BottomUpAicDataset',
# ann_file=f'{data_root}/annotations/aic_train.json',
# img_prefix=f'{data_root}/ai_challenger_keypoint_train_20170902/'
# 'keypoint_train_images_20170902/',
# data_cfg=data_cfg,
# pipeline=train_pipeline),
# val=dict(
# type='BottomUpAicDataset',
# ann_file=f'{data_root3}/annotations/train2.json',
# img_prefix=f'{data_root3}/images/',
# data_cfg=data_cfg,
# pipeline=val_pipeline),
# test=dict(
# type='BottomUpAicDataset',
# ann_file=f'{data_root3}/annotations/train2.json',
# img_prefix=f'{data_root3}/images/',
# data_cfg=data_cfg,
# pipeline=val_pipeline),
# )
|
py
|
1a5c8c03c42873b1d9198aaf15e8c169615039b6
|
# -*- coding: utf-8 -*-
"""
:author @CAB233
:url https://github.com/CAB233/everphoto_checkin
cron: 3 22 * * *
new Env('时光相册');
"""
import json
import requests
from notify_mtr import send
from utils import get_data
class EverPhoto:
def __init__(self, check_items):
self.check_items = check_items
def main(self):
msg_all = ""
for check_item in self.check_items:
mobile = check_item.get("mobile")
password = check_item.get("password")
header = {}
url = "https://api.everphoto.cn/users/self/checkin/v2"
login_url = "https://web.everphoto.cn/api/auth"
login_key = f"mobile={mobile}&password={password}"
login_res = requests.post(login_url, data=login_key, headers=header)
login_data = json.loads(login_res.text)["data"]
header["authorization"] = "Bearer " + login_data["token"]
response = requests.post(url, headers=header)
data = json.loads(response.text)
checkin_result = data["data"]["checkin_result"]
continuity = data["data"]["continuity"]
msg = (
"是否为今日第一次签到:" + str(checkin_result) + "\n" + "累积签到天数:" + str(continuity)
)
msg_all += msg + "\n\n"
return msg_all
if __name__ == "__main__":
data = get_data()
_check_items = data.get("EVERPHOTO", [])
res = EverPhoto(check_items=_check_items).main()
send("时光相册", res)
|
py
|
1a5c8d4522e581325f7cb1c240c60a091ed80d6f
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function
import os
import numpy as np
from numpy.testing import assert_allclose
from astropy.tests.helper import pytest
from astropy.table import Table
from fermipy.tests.utils import requires_dependency, requires_st_version
from fermipy import spectrum
try:
from fermipy import gtanalysis
except ImportError:
pass
# Skip tests in this file if Fermi ST aren't available
pytestmark = requires_dependency('Fermi ST')
@pytest.fixture(scope='module')
def create_draco_analysis(request, tmpdir_factory):
path = tmpdir_factory.mktemp('draco')
url = 'https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/fermipy_test_draco.tar.gz'
outfile = path.join('fermipy_test_draco.tar.gz')
dirname = path.join()
os.system('curl -o %s -OL %s' % (outfile, url))
os.system('cd %s;tar xzf %s' % (dirname, outfile))
request.addfinalizer(lambda: path.remove(rec=1))
cfgfile = path.join('fermipy_test_draco', 'config.yaml')
gta = gtanalysis.GTAnalysis(str(cfgfile))
gta.setup()
return gta
@pytest.fixture(scope='module')
def create_pg1553_analysis(request, tmpdir_factory):
path = tmpdir_factory.mktemp('pg1553')
url = 'https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/fermipy_test_pg1553.tar.gz'
outfile = path.join('fermipy_test_pg1553.tar.gz')
dirname = path.join()
os.system('curl -o %s -OL %s' % (outfile, url))
os.system('cd %s;tar xzf %s' % (dirname, outfile))
ft2_files = ['P8_P302_TRANSIENT020E_239557414_242187214_ft2.fits',
'P8_P302_TRANSIENT020E_247446814_250076614_ft2.fits',
'P8_P302_TRANSIENT020E_255336214_257966014_ft2.fits',
'P8_P302_TRANSIENT020E_242187214_244817014_ft2.fits',
'P8_P302_TRANSIENT020E_250076614_252706414_ft2.fits',
'P8_P302_TRANSIENT020E_257966014_260595814_ft2.fits',
'P8_P302_TRANSIENT020E_244817014_247446814_ft2.fits',
'P8_P302_TRANSIENT020E_252706414_255336214_ft2.fits',
'P8_P302_TRANSIENT020E_260595814_263225614_ft2.fits']
for f in ft2_files:
url = 'https://raw.githubusercontent.com/fermiPy/fermipy-extras/master/data/ft2/%s' % f
outfile = path.join('fermipy_test_pg1553', f)
os.system('curl -o %s -OL %s' % (outfile, url))
#request.addfinalizer(lambda: path.remove(rec=1))
cfgfile = path.join('fermipy_test_pg1553', 'config.yaml')
gta = gtanalysis.GTAnalysis(str(cfgfile))
gta.setup()
return gta
def test_gtanalysis_setup(create_draco_analysis):
gta = create_draco_analysis
gta.print_roi()
def test_print_model(create_draco_analysis):
gta = create_draco_analysis
gta.print_model()
def test_print_params(create_draco_analysis):
gta = create_draco_analysis
gta.print_params(True)
def test_gtanalysis_write_roi(create_draco_analysis):
gta = create_draco_analysis
gta.write_roi('test', make_plots=True)
def test_gtanalysis_load_roi(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
src = gta.roi['3FGL J1725.3+5853']
prefactor = src.spectral_pars['Prefactor']
index = src.spectral_pars['Index']
assert_allclose(prefactor['value'] * prefactor['scale'],
1.6266779e-13, rtol=1E-3)
assert_allclose(index['value'] * index['scale'], -2.17892, rtol=1E-3)
assert_allclose(src['flux'], 4.099648e-10, rtol=1E-3)
assert_allclose(src['flux_err'], np.nan, rtol=1E-3)
assert_allclose(src['eflux'], 9.76762e-07, rtol=1E-3)
assert_allclose(src['eflux_err'], np.nan, rtol=1E-3)
gta.load_roi('fit1')
src = gta.roi['3FGL J1725.3+5853']
prefactor = src.spectral_pars['Prefactor']
index = src.spectral_pars['Index']
assert_allclose(prefactor['value'] *
prefactor['scale'], 2.0878036e-13, rtol=1E-3)
assert_allclose(index['value'] * index['scale'], -2.053723, rtol=1E-3)
assert_allclose(src['flux'], 5.377593e-10, rtol=1E-3)
assert_allclose(src['flux_err'], 6.40203e-11, rtol=1E-3)
assert_allclose(src['eflux'], 1.34617749e-06, rtol=1E-3)
assert_allclose(src['eflux_err'], 1.584814e-07, rtol=1E-3)
assert_allclose(src['ts'], 200.604, rtol=1E-3)
assert_allclose(src['npred'], 170.258, rtol=1E-3)
def test_gtanalysis_optimize(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
gta.optimize()
def test_gtanalysis_fit(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
gta.free_sources(distance=3.0, pars='norm')
gta.write_xml('fit_test')
fit_output0 = gta.fit(optimizer='MINUIT')
gta.load_xml('fit_test')
fit_output1 = gta.fit(optimizer='NEWMINUIT')
assert (np.abs(fit_output0['loglike'] - fit_output1['loglike']) < 0.01)
@requires_st_version('11-04-00')
def test_gtanalysis_fit_newton(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit0')
gta.free_sources(distance=3.0, pars='norm')
gta.write_xml('fit_test')
fit_output0 = gta.fit(optimizer='MINUIT')
gta.load_xml('fit_test')
fit_output1 = gta.fit(optimizer='NEWTON')
assert (np.abs(fit_output0['loglike'] - fit_output1['loglike']) < 0.01)
def test_gtanalysis_tsmap(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
gta.tsmap(model={}, make_plots=True)
@requires_st_version('11-04-00')
def test_gtanalysis_tscube(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
gta.tscube(model={}, make_plots=True)
def test_gtanalysis_residmap(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
gta.residmap(model={}, make_plots=True)
def test_gtanalysis_find_sources(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
np.random.seed(1)
src0 = {'SpatialModel': 'PointSource',
'Index': 2.0, 'offset_glon': 0.0, 'offset_glat': 2.0,
'Prefactor': 1E-12}
src1 = {'SpatialModel': 'PointSource',
'Index': 2.0, 'offset_glon': 0.0, 'offset_glat': -2.0,
'Prefactor': 1E-12}
gta.add_source('src0', src0)
gta.add_source('src1', src1)
gta.simulate_roi()
src0 = gta.delete_source('src0')
src1 = gta.delete_source('src1')
gta.find_sources()
diff_sources = [s.name for s in gta.roi.sources if s.diffuse]
newsrcs0 = gta.get_sources(skydir=src0.skydir, distance=0.3,
exclude=diff_sources)
newsrcs1 = gta.get_sources(skydir=src1.skydir, distance=0.3,
exclude=diff_sources)
assert(len(newsrcs0) == 1)
assert(len(newsrcs1) == 1)
newsrc0 = newsrcs0[0]
newsrc1 = newsrcs1[0]
sep0 = src0.skydir.separation(newsrc0.skydir).deg
sep1 = src1.skydir.separation(newsrc1.skydir).deg
assert(sep0 < newsrc0['pos_r99'])
assert(sep1 < newsrc1['pos_r99'])
flux_diff0 = (np.abs(src0['flux'] - newsrc0['flux']) /
newsrc0['flux_err'])
flux_diff1 = (np.abs(src1['flux'] - newsrc1['flux']) /
newsrc1['flux_err'])
assert(flux_diff0 < 3.0)
assert(flux_diff1 < 3.0)
def test_gtanalysis_sed(create_draco_analysis):
gta = create_draco_analysis
gta.load_roi('fit1')
np.random.seed(1)
gta.simulate_roi()
params = gta.roi['draco'].params
prefactor = 3E-12
index = 1.9
scale = params['Scale']['value']
emin = gta.energies[:-1]
emax = gta.energies[1:]
flux_true = spectrum.PowerLaw.eval_flux(emin, emax,
[prefactor, -index], scale)
gta.simulate_source({'SpatialModel': 'PointSource',
'Index': index,
'Scale': scale,
'Prefactor': prefactor})
gta.free_source('draco')
gta.fit()
o = gta.sed('draco', make_plots=True)
flux_resid = (flux_true - o['flux']) / o['flux_err']
assert_allclose(flux_resid, 0, atol=3.0)
params = gta.roi['draco'].params
index_resid = (-params['Index']['value'] - index) / \
params['Index']['error']
assert_allclose(index_resid, 0, atol=3.0)
prefactor_resid = (params['Prefactor']['value'] -
prefactor) / params['Prefactor']['error']
assert_allclose(prefactor_resid, 0, atol=3.0)
gta.simulate_roi(restore=True)
def test_gtanalysis_extension_gaussian(create_draco_analysis):
gta = create_draco_analysis
gta.simulate_roi(restore=True)
gta.load_roi('fit1')
np.random.seed(1)
spatial_width = 0.5
gta.simulate_source({'SpatialModel': 'RadialGaussian',
'SpatialWidth': spatial_width,
'Prefactor': 3E-12})
o = gta.extension('draco',
width=[0.4, 0.45, 0.5, 0.55, 0.6],
spatial_model='RadialGaussian')
assert_allclose(o['ext'], spatial_width, atol=0.1)
gta.simulate_roi(restore=True)
def test_gtanalysis_localization(create_draco_analysis):
gta = create_draco_analysis
gta.simulate_roi(restore=True)
gta.load_roi('fit1')
np.random.seed(1)
src_dict = {'SpatialModel': 'PointSource',
'Prefactor': 4E-12,
'glat': 36.0, 'glon': 86.0}
gta.simulate_source(src_dict)
src_dict['glat'] = 36.05
src_dict['glon'] = 86.05
gta.add_source('testloc', src_dict, free=True)
gta.fit()
result = gta.localize('testloc', nstep=4, dtheta_max=0.5, update=True,
make_plots=True)
assert result['fit_success'] is True
assert_allclose(result['glon'], 86.0, atol=0.02)
assert_allclose(result['glat'], 36.0, atol=0.02)
gta.delete_source('testloc')
gta.simulate_roi(restore=True)
def test_gtanalysis_lightcurve(create_pg1553_analysis):
gta = create_pg1553_analysis
gta.load_roi('fit1')
o = gta.lightcurve('3FGL J1555.7+1111', nbins=2,
free_radius=3.0)
rtol = 0.01
flux = np.array([2.917568e-08,
2.359114e-08])
flux_err = np.array([1.931940e-09,
1.822694e-09])
ts = np.array([1463.066,
1123.160])
assert_allclose(o['flux'], flux, rtol=rtol)
assert_allclose(o['flux_err'], flux_err, rtol=rtol)
assert_allclose(o['ts'], ts, rtol=rtol)
tab = Table.read(os.path.join(gta.workdir, o['file']))
assert_allclose(tab['flux'], flux, rtol=rtol)
assert_allclose(tab['flux_err'], flux_err, rtol=rtol)
assert_allclose(tab['ts'], ts, rtol=rtol)
|
py
|
1a5c8e1f159ac144f69215885aecd82381977894
|
""" OpenCV Backend RTSP Client """
import cv2
from io import BytesIO
from PIL import Image
from threading import Thread
class Client:
""" Maintain live RTSP feed without buffering. """
_stream = None
def __init__(self, rtsp_server_uri, verbose = False):
"""
rtsp_server_uri: the path to an RTSP server. should start with "rtsp://"
verbose: print log or not
"""
self.rtsp_server_uri = rtsp_server_uri
self._verbose = verbose
if isinstance(rtsp_server_uri,str) and 'picam' in rtsp_server_uri:
self.__class__ = PicamVideoFeed
_pc = PicamVideoFeed()
self.__dict__.update(_pc.__dict__)
self._bg_run = False
self.open()
def __enter__(self,*args,**kwargs):
""" Returns the object which later will have __exit__ called.
This relationship creates a context manager. """
return self
def __exit__(self, type=None, value=None, traceback=None):
""" Together with __enter__, allows support for `with-` clauses. """
self.close()
def open(self):
if self.isOpened():
return
self._stream = cv2.VideoCapture(self.rtsp_server_uri)
if self._verbose:
print("Connected to video source {}.".format(self.rtsp_server_uri))
self._bg_run = True
t = Thread(target=self._update, args=())
t.daemon = True
t.start()
self._bgt = t
return self
def close(self):
""" signal background thread to stop. release CV stream """
self._bg_run = False
self._bgt.join()
if self._verbose:
print("Disconnected from {}".format(self.rtsp_server_uri))
def isOpened(self):
""" return true if stream is opened and being read, else ensure closed """
try:
return (self._stream is not None) and self._stream.isOpened() and self._bg_run
except:
self.close()
return False
def _update(self):
while self.isOpened():
(grabbed, frame) = self._stream.read()
if not grabbed:
self._bg_run = False
else:
self._queue = frame
self._stream.release()
def read(self,raw=False):
""" Retrieve most recent frame and convert to PIL. Return unconverted with raw=True. """
try:
if raw:
return self._queue
else:
return Image.fromarray(cv2.cvtColor(self._queue, cv2.COLOR_BGR2RGB))
except:
return None
def preview(self):
""" Blocking function. Opens OpenCV window to display stream. """
win_name = 'RTSP'
cv2.namedWindow(win_name, cv2.WINDOW_AUTOSIZE)
cv2.moveWindow(win_name,20,20)
while(self.isOpened()):
cv2.imshow(win_name,self.read(raw=True))
if cv2.waitKey(30) == ord('q'): # wait 30 ms for 'q' input
break
cv2.waitKey(1)
cv2.destroyAllWindows()
cv2.waitKey(1)
class PicamVideoFeed(Client):
def __init__(self):
import picamera
self.cam = picamera.PiCamera()
def preview(self,*args,**kwargs):
""" Blocking function. Opens OpenCV window to display stream. """
self.cam.start_preview(*args,**kwargs)
def open(self):
pass
def isOpened(self):
return True
def read(self):
"""https://picamera.readthedocs.io/en/release-1.13/recipes1.html#capturing-to-a-pil-image"""
stream = BytesIO()
self.cam.capture(stream, format='png')
# "Rewind" the stream to the beginning so we can read its content
stream.seek(0)
return Image.open(stream)
def close(self):
pass
def stop(self):
pass
|
py
|
1a5c8eb21ad1b804aca5c476f3c5fcaffb1fa909
|
"""
Command-line usage support.
"""
import argparse
from . import asm2cfg
def main():
""" Command-line entry point to the program. """
parser = argparse.ArgumentParser(
description='Program to draw dot control-flow graph from GDB disassembly for a function.',
epilog='If function CFG rendering takes too long, try to skip function calls with -c flag.'
)
parser.add_argument('assembly_file',
help='File to contain one function assembly dump')
parser.add_argument('-c', '--skip-calls', action='store_true',
help='Skip function calls from dividing code to blocks')
parser.add_argument('--target', choices=['x86', 'arm'], default='x86',
help='Specify target platform for assembly')
parser.add_argument('-v', '--view', action='store_true',
help='View as a dot graph instead of saving to a file')
args = parser.parse_args()
print('If function CFG rendering takes too long, try to skip function calls with -c flag')
lines = asm2cfg.read_lines(args.assembly_file)
function_name, basic_blocks = asm2cfg.parse_lines(lines, args.skip_calls, args.target)
asm2cfg.draw_cfg(function_name, basic_blocks, args.view)
|
py
|
1a5c8eb56bc9cf822dbad7991ae53ff341093c52
|
import os
import shutil
import time
from fairseq import pyrouge
def rouge_results_to_str(results_dict):
return ">> ROUGE-F(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\nROUGE-R(1/2/3/l): {:.2f}/{:.2f}/{:.2f}\n".format(
results_dict["rouge_1_f_score"] * 100,
results_dict["rouge_2_f_score"] * 100,
results_dict["rouge_l_f_score"] * 100,
results_dict["rouge_1_recall"] * 100,
results_dict["rouge_2_recall"] * 100,
results_dict["rouge_l_recall"] * 100
# ,results_dict["rouge_su*_f_score"] * 100
)
def test_rouge(temp_dir, cand, ref):
candidates = [line.strip() for line in open(cand, encoding='utf-8')]
references = [line.strip() for line in open(ref, encoding='utf-8')]
print(len(candidates))
print(len(references))
assert len(candidates) == len(references)
cnt = len(candidates)
current_time = time.strftime('%Y-%m-%d-%H-%M-%S', time.localtime())
tmp_dir = os.path.join(temp_dir, "rouge-tmp-{}".format(current_time))
if not os.path.isdir(tmp_dir):
os.mkdir(tmp_dir)
os.mkdir(tmp_dir + "/candidate")
os.mkdir(tmp_dir + "/reference")
try:
for i in range(cnt):
if len(references[i]) < 1:
continue
with open(tmp_dir + "/candidate/cand.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(candidates[i])
with open(tmp_dir + "/reference/ref.{}.txt".format(i), "w",
encoding="utf-8") as f:
f.write(references[i])
r = pyrouge.Rouge155(temp_dir=temp_dir)
r.model_dir = tmp_dir + "/reference/"
r.system_dir = tmp_dir + "/candidate/"
r.model_filename_pattern = 'ref.#ID#.txt'
r.system_filename_pattern = r'cand.(\d+).txt'
rouge_results = r.convert_and_evaluate()
print(rouge_results)
results_dict = r.output_to_dict(rouge_results)
finally:
pass
if os.path.isdir(tmp_dir):
shutil.rmtree(tmp_dir)
return results_dict
|
py
|
1a5c8f8195195a4a7556518bb3c3495f0fea7122
|
def poly_to_line_layer(ds, poly_name, line_name):
"""Creates a line layer from a polygon layer."""
# Delete the line layer if it exists.
if ds.GetLayer(line_name):
ds.DeleteLayer(line_name)
# Get the polygon layer and its spatial reference.
poly_lyr = ds.GetLayer(poly_name)
sr = poly_lyr.GetSpatialRef()
# Create a line layer with the same SR as the polygons
# and copy the field definitions from the polygons to
# the line layer.
line_lyr = ds.CreateLayer(line_name, sr, ogr.wkbLineString)
line_lyr.CreateFields(poly_lyr.schema)
# Create a feature to use over and over.
line_feat = ogr.Feature(line_lyr.GetLayerDefn())
# Loop through all of the polygons.
for poly_feat in poly_lyr:
# Copy the attribute values from the polygon to the
# new feature.
atts = poly_feat.items()
for fld_name in atts.keys():
line_feat.SetField(fld_name, atts[fld_name])
# Loop through the rings in the polygon.
poly_geom = poly_feat.geometry()
for i in range(poly_geom.GetGeometryCount()):
ring = poly_geom.GetGeometryRef(i)
# Create a new line using the ring's vertices.
line_geom = ogr.Geometry(ogr.wkbLineString)
for coords in ring.GetPoints():
line_geom.AddPoint(*coords)
# Insert the new line feature.
line_feat.SetGeometry(line_geom)
line_lyr.CreateFeature(line_feat)
|
py
|
1a5c8fc732aa19f7bfc7e98f0ffa58b90637ad61
|
import ctypes
import os
import pkgutil
from lizard import LOG, PROGRAM_DATA_DIRNAME
from lizard import user_prog, util
def check_cpus():
"""
check CPU information
:returns: dict with CPU info
"""
data = {}
lscpu_out = util.subp(["lscpu"])[0]
for k, v in [l.split(':') for l in lscpu_out.splitlines() if ':' in l]:
data[k.strip()] = v.strip()
return {'max_threads': int(data['CPU(s)']), 'name': data['Model name']}
class GPUProps(ctypes.Structure):
"""GPU properties struct"""
_fields_ = [
('gpu_index', ctypes.c_int),
('comp_level_major', ctypes.c_int),
('comp_level_minor', ctypes.c_int),
('sm_count', ctypes.c_int),
('max_sm_threads', ctypes.c_int),
('max_sm_blocks', ctypes.c_int),
('max_block_size', ctypes.c_int),
('max_total_threads', ctypes.c_int),
('max_total_blocks', ctypes.c_int),
('name', ctypes.c_char * 256),
]
def setup_cuda_detect(args, tmpdir):
"""
set up CUDA detect program
:args: parsed cmdline args
:tmpdir: temporary directory
:returns: wrapped program
"""
prog_dir = os.path.join(tmpdir, 'hw_detect')
os.mkdir(prog_dir)
def load_resource_to_prog_dir(fname, resource_dir='hw_discovery'):
resource_path = os.path.join(PROGRAM_DATA_DIRNAME, resource_dir, fname)
data = pkgutil.get_data('lizard', resource_path)
path = os.path.join(prog_dir, fname)
with open(path, 'wb') as fp:
fp.write(data)
return data
for fname in ('kernel.cu', 'program.h'):
load_resource_to_prog_dir(fname)
conf_fname = 'config.json'
data_file = os.path.join(prog_dir, conf_fname)
conf_raw = load_resource_to_prog_dir(conf_fname)
checksum = util.checksum(conf_raw)
program = user_prog.UserProg(
'Hardware Discovery', checksum, data_file, {}, build_dir=prog_dir)
program.build(
cuda_bin=args.bin, include_path=args.include, unpack=False,
set_compute_level=False)
so_path = os.path.join(prog_dir, 'user_program_cuda.so')
wrapper = ctypes.cdll.LoadLibrary(so_path)
wrapper.get_num_gpus.restype = ctypes.c_int
wrapper.get_gpu_data.argtypes = [ctypes.c_int, ctypes.POINTER(GPUProps)]
return wrapper
def get_reasonable_block_size(props, size_mult=32):
"""
get reasonable cuda block size
:props: gpu properties dict
:size_mult: block size multiple
:returns: reasonable block size
"""
max_reasonable_size = props['max_block_size']
min_reasonable_size = props['max_sm_threads'] / props['max_sm_blocks']
avg_reasonable_size = (max_reasonable_size + min_reasonable_size) / 2
reasonable_block_size = int(avg_reasonable_size/size_mult) * size_mult
LOG.debug('Using CUDA block size: %s', reasonable_block_size)
return reasonable_block_size
def check_gpus(args, tmpdir):
"""
check for CUDA capable GPUs
:args: parsed cmdline args
:tmpdir: temporary directory
:returns: dict with GPU info
"""
if args.no_gpu:
LOG.warning("Not scanning available gpus, running programs will fail")
return {'num_gpus': 0, 'gpu_info': []}
LOG.info('Checking CUDA build system')
program = setup_cuda_detect(args, tmpdir)
res = {
'num_gpus': program.get_num_gpus(),
'gpu_info': [],
}
for gpu_index in range(res['num_gpus']):
props = GPUProps()
program.get_gpu_data(gpu_index, ctypes.byref(props))
gpu_info = {
'gpu_index': props.gpu_index,
'comp_level_major': props.comp_level_major,
'comp_level_minor': props.comp_level_minor,
'sm_count': props.sm_count,
'max_sm_threads': props.max_sm_threads,
'max_sm_blocks': props.max_sm_blocks,
'max_block_size': props.max_block_size,
'max_total_threads': props.max_total_threads,
'max_total_blocks': props.max_total_blocks,
'name': props.name.decode(),
}
gpu_info['reasonable_block_size'] = get_reasonable_block_size(gpu_info)
res['gpu_info'].append(gpu_info)
return res
def scan_hardware(args, tmpdir):
"""
scan system hardware
:args: parsed cmdline args
:tmpdir: temporary directory
:returns: dict with hardware info
"""
hardware = {
'CPU': check_cpus(),
'GPU': check_gpus(args, tmpdir),
}
LOG.debug('hardware scan found: %s', hardware)
return hardware
|
py
|
1a5c9060035b5dd193ea98d4a8a9a16f19846427
|
from .WidgetRedirector import WidgetRedirector
from Delegator import Delegator
class Percolator:
def __init__(self, text):
# XXX would be nice to inherit from Delegator
self.text = text
self.redir = WidgetRedirector(text)
self.top = self.bottom = Delegator(text)
self.bottom.insert = self.redir.register("insert", self.insert)
self.bottom.delete = self.redir.register("delete", self.delete)
self.filters = []
def close(self):
while self.top is not self.bottom:
self.removefilter(self.top)
self.top = None
self.bottom.setdelegate(None); self.bottom = None
self.redir.close(); self.redir = None
self.text = None
def insert(self, index, chars, tags=None):
# Could go away if inheriting from Delegator
self.top.insert(index, chars, tags)
def delete(self, index1, index2=None):
# Could go away if inheriting from Delegator
self.top.delete(index1, index2)
def insertfilter(self, filter):
# Perhaps rename to pushfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is None
filter.setdelegate(self.top)
self.top = filter
def removefilter(self, filter):
# XXX Perhaps should only support popfilter()?
assert isinstance(filter, Delegator)
assert filter.delegate is not None
f = self.top
if f is filter:
self.top = filter.delegate
filter.setdelegate(None)
else:
while f.delegate is not filter:
assert f is not self.bottom
f.resetcache()
f = f.delegate
f.setdelegate(filter.delegate)
filter.setdelegate(None)
def _percolator(parent):
import tkinter as tk
import re
class Tracer(Delegator):
def __init__(self, name):
self.name = name
Delegator.__init__(self, None)
def insert(self, *args):
print(self.name, ": insert", args)
self.delegate.insert(*args)
def delete(self, *args):
print(self.name, ": delete", args)
self.delegate.delete(*args)
root = tk.Tk()
root.title("Test Percolator")
width, height, x, y = list(map(int, re.split('[x+]', parent.geometry())))
root.geometry("+%d+%d"%(x, y + 150))
text = tk.Text(root)
p = Percolator(text)
t1 = Tracer("t1")
t2 = Tracer("t2")
def toggle1():
if var1.get() == 0:
var1.set(1)
p.insertfilter(t1)
elif var1.get() == 1:
var1.set(0)
p.removefilter(t1)
def toggle2():
if var2.get() == 0:
var2.set(1)
p.insertfilter(t2)
elif var2.get() == 1:
var2.set(0)
p.removefilter(t2)
text.pack()
var1 = tk.IntVar()
cb1 = tk.Checkbutton(root, text="Tracer1", command=toggle1, variable=var1)
cb1.pack()
var2 = tk.IntVar()
cb2 = tk.Checkbutton(root, text="Tracer2", command=toggle2, variable=var2)
cb2.pack()
root.mainloop()
if __name__ == "__main__":
from MrPython.idle_test.htest import run
run(_percolator)
|
py
|
1a5c90ebbc20ca33ca140808c195a6d4854e1b1a
|
import jax
import jax.numpy as jnp
import haiku as hk
from e3nn_jax import BatchNorm, Irreps
from e3nn_jax.util.test import assert_equivariant
import pytest
@pytest.mark.parametrize("irreps", [Irreps("3x0e + 3x0o + 4x1e"), Irreps("3x0o + 3x0e + 4x1e")])
def test_equivariant(keys, irreps):
@hk.without_apply_rng
@hk.transform_with_state
def b(x, is_training=True):
m = BatchNorm(irreps=irreps)
return m(x, is_training).contiguous
params, state = b.init(next(keys), irreps.randn(next(keys), (16, -1)))
_, state = b.apply(params, state, irreps.randn(next(keys), (16, -1)))
_, state = b.apply(params, state, irreps.randn(next(keys), (16, -1)))
m_train = lambda x: b.apply(params, state, x)[0]
assert_equivariant(m_train, next(keys), irreps_in=[irreps], irreps_out=[irreps])
m_eval = lambda x: b.apply(params, state, x, is_training=False)[0]
assert_equivariant(m_eval, next(keys), irreps_in=[irreps], irreps_out=[irreps])
@pytest.mark.parametrize("affine", [True, False])
@pytest.mark.parametrize("reduce", ["mean", "max"])
@pytest.mark.parametrize("normalization", ["norm", "component"])
@pytest.mark.parametrize("instance", [True, False])
def test_modes(keys, affine, reduce, normalization, instance):
irreps = Irreps("10x0e + 5x1e")
@hk.without_apply_rng
@hk.transform_with_state
def b(x, is_training=True):
m = BatchNorm(irreps=irreps, affine=affine, reduce=reduce, normalization=normalization, instance=instance)
return m(x, is_training)
params, state = b.init(next(keys), irreps.randn(next(keys), (16, -1)))
m_train = lambda x: b.apply(params, state, x)[0]
m_eval = lambda x: b.apply(params, state, x, is_training=False)[0]
m_train(irreps.randn(next(keys), (20, 20, -1)))
m_eval(irreps.randn(next(keys), (20, 20, -1)))
@pytest.mark.parametrize("instance", [True, False])
def test_normalization(keys, instance):
float_tolerance = 1e-3
sqrt_float_tolerance = jnp.sqrt(float_tolerance)
batch, n = 20, 20
irreps = Irreps("3x0e + 4x1e")
@hk.without_apply_rng
@hk.transform_with_state
def b(x, is_training=True):
m = BatchNorm(irreps=irreps, normalization="norm", instance=instance)
return m(x, is_training)
params, state = b.init(next(keys), irreps.randn(next(keys), (16, -1)))
x = jax.random.normal(next(keys), (batch, n, irreps.dim)) * 5 + 10
x, state = b.apply(params, state, x)
a = x.list[0] # [batch, space, mul, 1]
assert jnp.max(jnp.abs(a.mean([0, 1]))) < float_tolerance
assert jnp.max(jnp.abs(jnp.square(a).mean([0, 1]) - 1)) < sqrt_float_tolerance
a = x.list[1] # [batch, space, mul, repr]
assert jnp.max(jnp.abs(jnp.square(a).sum(3).mean([0, 1]) - 1)) < sqrt_float_tolerance
@hk.without_apply_rng
@hk.transform_with_state
def b(x, is_training=True):
m = BatchNorm(irreps=irreps, normalization="component", instance=instance)
return m(x, is_training)
params, state = b.init(next(keys), irreps.randn(next(keys), (16, -1)))
x = jax.random.normal(next(keys), (batch, n, irreps.dim)) * 5 + 10.0
x, state = b.apply(params, state, x)
a = x.list[0] # [batch, space, mul, 1]
assert jnp.max(jnp.abs(a.mean([0, 1]))) < float_tolerance
assert jnp.max(jnp.abs(jnp.square(a).mean([0, 1]) - 1)) < sqrt_float_tolerance
a = x.list[1] # [batch, space, mul, repr]
assert jnp.max(jnp.abs(jnp.square(a).mean(3).mean([0, 1]) - 1)) < sqrt_float_tolerance
|
py
|
1a5c92549b4cd9a263b243cf8c9afea14571f71d
|
from django.contrib.gis import admin
# from django.contrib.gis.admin import OSMGeoAdmin
# Register your models here.
from .models import LocationUser,LocationStream
admin.site.register(LocationStream)
admin.site.register(LocationUser)
|
py
|
1a5c925b89b9f9691dc98c6dfaaea966ca26d37c
|
#!/usr/bin/python3
"""
Chern machine runner
"""
import daemon
import time
from daemon import pidfile
import os
import sys
import subprocess
from Chern.utils import csys
from Chern.utils import metadata
from ChernMachine.ChernDatabase import ChernDatabase
from ChernMachine.kernel.VImage import VImage
from ChernMachine.kernel.VContainer import VContainer
from ChernMachine.kernel.VJob import VJob
cherndb = ChernDatabase.instance()
def check_status():
pending_jobs = cherndb.jobs("pending")
def execute():
running_jobs = cherndb.jobs("running")
if len(running_jobs) > 3:
return
waitting_jobs = cherndb.jobs("submitted")
# print("List {0}".format(waitting_jobs), file=sys.stderr)
for job in waitting_jobs:
print("Running {0}".format(job), file=sys.stderr)
if job.satisfied():
print("chern_machine execute {}".format(job.path), file=sys.stderr)
# FIXME Make sure the job will not be executed many times
status_file = metadata.ConfigFile(os.path.join(job.path, "status.json"))
subprocess.Popen("chern_machine execute {}".format(job.path), shell=True)
while (job.status() == "submitted"):
pass
def status():
daemon_path = csys.daemon_path()
pid_file = os.path.join(os.environ["HOME"], ".ChernMachine", "daemon/runner.pid")
if os.path.exists(pid_file):
return "started"
pid = open(pid_file).read().decode().strip()
else:
return "stopped"
def start():
pid_file = os.path.join(os.environ["HOME"], ".ChernMachine", "daemon/runner.pid")
log_file = os.path.join(os.environ["HOME"], ".ChernMachine", "daemon/runner.log")
with daemon.DaemonContext(
working_directory="/",
pidfile=pidfile.TimeoutPIDLockFile(pid_file),
stderr=open(log_file, "w+"),
):
while True:
time.sleep(1)
try:
execute()
except Exception as e:
print(e, file=sys.stderr)
def stop():
if status() == "stopped":
return
pid_file = os.path.join(os.environ["HOME"], ".ChernMachine", "daemon/runner.pid")
subprocess.call("kill {}".format(open(pid_file).read()), shell=True)
|
py
|
1a5c92b3ce53a8a416134807722d7072f6d084d9
|
from rpython.rtyper.lltypesystem import lltype, rffi
from rpython.rlib.rarithmetic import widen
from rpython.rlib.unroll import unrolling_iterable
from rpython.rlib.objectmodel import (specialize, import_from_mixin)
from pypy.interpreter.error import oefmt
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.function import descr_function_get
from pypy.interpreter.typedef import TypeDef, interp2app
from pypy.objspace.std.typeobject import W_TypeObject
from pypy.module._hpy_universal import llapi
from .state import State
HPySlot_Slot = llapi.cts.gettype('HPySlot_Slot')
HPy_RichCmpOp = llapi.cts.gettype('HPy_RichCmpOp')
_WRAPPER_CACHE = {}
class W_SlotWrapper(W_Root):
_immutable_fields_ = ["slot"]
def __init__(self, slot, method_name, cfuncptr, w_objclass):
self.slot = slot
self.name = method_name
self.cfuncptr = cfuncptr
self.w_objclass = w_objclass
def check_args(self, space, __args__, arity):
length = len(__args__.arguments_w)
if length != arity:
raise oefmt(space.w_TypeError, "expected %d arguments, got %d",
arity, length)
if __args__.keywords:
raise oefmt(space.w_TypeError,
"wrapper %s doesn't take any keyword arguments",
self.name)
def check_argsv(self, space, __args__, min, max):
length = len(__args__.arguments_w)
if not min <= length <= max:
raise oefmt(space.w_TypeError, "expected %d-%d arguments, got %d",
min, max, length)
if __args__.keywords:
raise oefmt(space.w_TypeError,
"wrapper %s doesn't take any keyword arguments",
self.name)
def descr_call(self, space, __args__):
# XXX: basically a copy of cpyext's W_PyCMethodObject.descr_call()
if len(__args__.arguments_w) == 0:
w_objclass = self.w_objclass
assert isinstance(w_objclass, W_TypeObject)
raise oefmt(space.w_TypeError,
"descriptor '%8' of '%s' object needs an argument",
self.name, self.w_objclass.getname(space))
w_instance = __args__.arguments_w[0]
# XXX: needs a stricter test
if not space.isinstance_w(w_instance, self.w_objclass):
w_objclass = self.w_objclass
assert isinstance(w_objclass, W_TypeObject)
raise oefmt(space.w_TypeError,
"descriptor '%8' requires a '%s' object but received a '%T'",
self.name, w_objclass.name, w_instance)
#
return self.call(space, __args__)
def call(self, space, __args__):
raise oefmt(space.w_RuntimeError, "bad slot wrapper")
W_SlotWrapper.typedef = TypeDef(
'slot_wrapper',
__get__ = interp2app(descr_function_get),
__call__ = interp2app(W_SlotWrapper.descr_call),
)
W_SlotWrapper.typedef.acceptable_as_base_class = False
# ~~~~~~~~~~ concrete W_SlotWrapper subclasses ~~~~~~~~~~~~~
# these are the equivalent of the various functions wrap_* inside CPython's typeobject.c
class W_wrap_binaryfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_binaryfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_other = __args__.arguments_w[1]
with self.handles.using(w_self, w_other) as (h_self, h_other):
h_result = func(self.ctx, h_self, h_other)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
@specialize.memo()
def get_cmp_wrapper_cls(handles, methname, OP):
try:
return _WRAPPER_CACHE[handles, methname]
except KeyError:
pass
class wrapper(W_SlotWrapper):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_richcmpfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_other = __args__.arguments_w[1]
with handles.using(w_self, w_other) as (h_self, h_other):
# rffi doesn't allow casting to an enum, we need to use int
# instead
h_result = func(
handles.ctx, h_self, h_other, rffi.cast(rffi.INT_real, OP))
if not h_result:
space.fromcache(State).raise_current_exception()
return handles.consume(h_result)
suffix = '_d' if handles.is_debug else '_u'
wrapper.__name__ = 'W_wrap_richcmp%s%s' % (methname, suffix)
_WRAPPER_CACHE[handles, methname] = wrapper
return wrapper
CMP_OPNAMES = ['eq', 'ne', 'lt', 'le', 'gt', 'ge']
CMP_ENUM_VALUES = [
getattr(HPy_RichCmpOp, 'HPy_%s' % opname.upper()) for opname in CMP_OPNAMES]
CMP_SLOTS = unrolling_iterable([
('__%s__' % opname, opval)
for opname, opval in zip(CMP_OPNAMES, CMP_ENUM_VALUES)])
class W_wrap_unaryfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_unaryfunc", self.cfuncptr)
self.check_args(space, __args__, 1)
w_self = __args__.arguments_w[0]
with self.handles.using(w_self) as h_self:
h_result = func(self.ctx, h_self)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_ternaryfunc(object):
def call(self, space, __args__):
# Literaly quote of the corresponding CPython comment:
# Note: This wrapper only works for __pow__()
#
func = llapi.cts.cast("HPyFunc_ternaryfunc", self.cfuncptr)
self.check_argsv(space, __args__, 2, 3)
n = len(__args__.arguments_w)
w_self = __args__.arguments_w[0]
w1 = __args__.arguments_w[1]
if n == 2:
w2 = space.w_None
else:
w2 = __args__.arguments_w[2]
with self.handles.using(w_self, w1, w2) as (h_self, h1, h2):
h_result = func(self.ctx, h_self, h1, h2)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_indexargfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeargfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = space.int_w(space.index(w_idx))
with self.handles.using(w_self) as h_self:
h_result = func(self.ctx, h_self, idx)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_inquirypred(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_inquiry", self.cfuncptr)
self.check_args(space, __args__, 1)
w_self = __args__.arguments_w[0]
with self.handles.using(w_self) as h_self:
res = func(self.ctx, h_self)
res = rffi.cast(lltype.Signed, res)
if res == -1:
space.fromcache(State).raise_current_exception()
return space.newbool(bool(res))
class W_wrap_lenfunc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_lenfunc", self.cfuncptr)
self.check_args(space, __args__, 1)
w_self = __args__.arguments_w[0]
with self.handles.using(w_self) as h_self:
result = func(self.ctx, h_self)
if widen(result) == -1:
space.fromcache(State).raise_current_exception()
return space.newint(result)
def sq_getindex(space, w_sequence, w_idx):
"""
This is equivalent to CPython's typeobject.c:getindex().
We call it sq_getindex because it's used only by sq_* slots.
"""
idx = space.int_w(space.index(w_idx))
if idx < 0 and space.lookup(w_sequence, '__len__'):
# It is worth noting that we are doing the lookup of __len__ twice,
# one above and one inside space.len_w. The JIT should optimize it
# away, but it might be a minor slowdown for interpreted code.
n = space.len_w(w_sequence)
idx += n
return idx
class W_wrap_sq_item(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeargfunc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = sq_getindex(space, w_self, w_idx)
with self.handles.using(w_self) as h_self:
h_result = func(self.ctx, h_self, idx)
if not h_result:
space.fromcache(State).raise_current_exception()
return self.handles.consume(h_result)
class W_wrap_sq_setitem(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeobjargproc", self.cfuncptr)
self.check_args(space, __args__, 3)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = sq_getindex(space, w_self, w_idx)
w_value = __args__.arguments_w[2]
with self.handles.using(w_self, w_value) as (h_self, h_value):
result = func(self.ctx, h_self, idx, h_value)
if widen(result) == -1:
space.fromcache(State).raise_current_exception()
return space.w_None
class W_wrap_sq_delitem(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_ssizeobjargproc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_idx = __args__.arguments_w[1]
idx = sq_getindex(space, w_self, w_idx)
with self.handles.using(w_self) as h_self:
result = func(self.ctx, h_self, idx, llapi.HPy_NULL)
if widen(result) == -1:
space.fromcache(State).raise_current_exception()
return space.w_None
class W_wrap_objobjproc(object):
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_objobjproc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_key = __args__.arguments_w[1]
with self.handles.using(w_self, w_key) as (h_self, h_key):
res = func(self.ctx, h_self, h_key)
res = widen(res)
if res == -1:
space.fromcache(State).raise_current_exception()
return space.newbool(bool(res))
class W_wrap_getbuffer(object):
rbp = llapi.cts.cast('HPyFunc_releasebufferproc', 0)
def call(self, space, __args__):
func = llapi.cts.cast("HPyFunc_getbufferproc", self.cfuncptr)
self.check_args(space, __args__, 2)
w_self = __args__.arguments_w[0]
w_flags = __args__.arguments_w[1]
flags = rffi.cast(rffi.INT_real, space.int_w(w_flags))
with lltype.scoped_alloc(llapi.cts.gettype('HPy_buffer')) as hpybuf:
with self.handles.using(w_self) as h_self:
res = func(self.ctx, h_self, hpybuf, flags)
if widen(res) < 0:
space.fromcache(State).raise_current_exception()
buf_ptr = hpybuf.c_buf
w_obj = self.handles.consume(hpybuf.c_obj.c__i)
size = hpybuf.c_len
ndim = widen(hpybuf.c_ndim)
shape = None
if hpybuf.c_shape:
shape = [hpybuf.c_shape[i] for i in range(ndim)]
strides = None
if hpybuf.c_strides:
strides = [hpybuf.c_strides[i] for i in range(ndim)]
if hpybuf.c_format:
format = rffi.charp2str(hpybuf.c_format)
else:
format = 'B'
view = self.handles.HPyBuffer(
buf_ptr, size, w_obj,
itemsize=hpybuf.c_itemsize,
readonly=widen(hpybuf.c_readonly),
ndim=widen(hpybuf.c_ndim), format=format, shape=shape,
strides=strides)
if self.rbp:
# XXX: we're assuming w_self and w_obj have the same type!
view.releasebufferproc = self.rbp
self.handles.BUFFER_FQ.register_finalizer(view)
return view.wrap(space)
# remaining wrappers to write
## wrap_binaryfunc_l(PyObject *self, PyObject *args, void *wrapped)
## wrap_binaryfunc_r(PyObject *self, PyObject *args, void *wrapped)
## wrap_ternaryfunc_r(PyObject *self, PyObject *args, void *wrapped)
## wrap_objobjargproc(PyObject *self, PyObject *args, void *wrapped)
## wrap_delitem(PyObject *self, PyObject *args, void *wrapped)
## wrap_setattr(PyObject *self, PyObject *args, void *wrapped)
## wrap_delattr(PyObject *self, PyObject *args, void *wrapped)
## wrap_hashfunc(PyObject *self, PyObject *args, void *wrapped)
## wrap_call(PyObject *self, PyObject *args, void *wrapped, PyObject *kwds)
## wrap_del(PyObject *self, PyObject *args, void *wrapped)
## wrap_next(PyObject *self, PyObject *args, void *wrapped)
## wrap_descr_get(PyObject *self, PyObject *args, void *wrapped)
## wrap_descr_set(PyObject *self, PyObject *args, void *wrapped)
## wrap_descr_delete(PyObject *self, PyObject *args, void *wrapped)
class W_wrap_init(object):
def call(self, space, __args__):
with self.handles.using(__args__.arguments_w[0]) as h_self:
n = len(__args__.arguments_w) - 1
with lltype.scoped_alloc(rffi.CArray(llapi.HPy), n) as args_h:
i = 0
while i < n:
args_h[i] = self.handles.new(__args__.arguments_w[i + 1])
i += 1
h_kw = 0
if __args__.keywords:
w_kw = space.newdict()
for i in range(len(__args__.keywords)):
key = __args__.keywords[i]
w_value = __args__.keywords_w[i]
space.setitem_str(w_kw, key, w_value)
h_kw = self.handles.new(w_kw)
fptr = llapi.cts.cast('HPyFunc_initproc', self.cfuncptr)
try:
result = fptr(self.ctx, h_self, args_h, n, h_kw)
finally:
if h_kw:
self.handles.close(h_kw)
for i in range(n):
self.handles.close(args_h[i])
if rffi.cast(lltype.Signed, result) < 0:
space.fromcache(State).raise_current_exception()
return space.w_None
@specialize.memo()
def get_slot_cls(handles, mixin):
try:
return _WRAPPER_CACHE[handles, mixin]
except KeyError:
pass
_handles = handles
class wrapper(W_SlotWrapper):
import_from_mixin(mixin)
handles = _handles
ctx = _handles.ctx
wrapper.__name__ = mixin.__name__ + handles.cls_suffix
_WRAPPER_CACHE[handles, mixin] = wrapper
return wrapper
@specialize.memo()
def get_tp_new_wrapper_cls(handles):
try:
return _WRAPPER_CACHE[handles, 'new']
except KeyError:
pass
class W_tp_new_wrapper(handles.w_ExtensionFunction):
"""
Special case for HPy_tp_new. Note that is not NOT a SlotWrapper.
This is the equivalent of CPython's tp_new_wrapper: the difference is that
CPython's tp_new_wrapper is a regular PyMethodDef which is wrapped inside
a PyCFunction, while here we have our own type.
"""
def __init__(self, cfuncptr, w_type):
handles.w_ExtensionFunction.__init__(
self, handles.space, handles, '__new__',
llapi.HPyFunc_KEYWORDS, None, cfuncptr, w_self=w_type)
def call(self, space, h_self, __args__, skip_args=0):
assert space is handles.space
assert skip_args == 0
# NOTE: h_self contains the type for which we are calling __new__, but
# here is ignored. In CPython's tp_new_wrapper it is only used to fish
# the ->tp_new to call, but here we already have the cfuncptr
#
# XXX: tp_new_wrapper does additional checks, we should write tests
# and implement the same checks
w_self = __args__.arguments_w[0]
with handles.using(w_self) as h_self:
return self.call_varargs_kw(space, h_self, __args__,
skip_args=1, has_keywords=True)
W_tp_new_wrapper.__name__ += handles.cls_suffix
_WRAPPER_CACHE[handles, 'new'] = W_tp_new_wrapper
return W_tp_new_wrapper
# the following table shows how to map C-level slots into Python-level
# __methods__. Note that if a C-level slot corresponds to multiple
# __methods__, it appears multiple times (e.g. sq_ass_item corresponds to both
# __setitem__ and __delitem__).
SLOTS = unrolling_iterable([
# CPython slots
('bf_getbuffer', '__buffer__', W_wrap_getbuffer),
# ('mp_ass_subscript', '__xxx__', AGS.W_SlotWrapper_...),
# ('mp_length', '__xxx__', AGS.W_SlotWrapper_...),
# ('mp_subscript', '__getitem__', AGS.W_SlotWrapper_binaryfunc),
('nb_absolute', '__abs__', W_wrap_unaryfunc),
('nb_add', '__add__', W_wrap_binaryfunc),
('nb_and', '__and__', W_wrap_binaryfunc),
('nb_bool', '__bool__', W_wrap_inquirypred),
('nb_divmod', '__divmod__', W_wrap_binaryfunc),
('nb_float', '__float__', W_wrap_unaryfunc),
('nb_floor_divide', '__floordiv__', W_wrap_binaryfunc),
('nb_index', '__index__', W_wrap_unaryfunc),
('nb_inplace_add', '__iadd__', W_wrap_binaryfunc),
('nb_inplace_and', '__iand__', W_wrap_binaryfunc),
('nb_inplace_floor_divide', '__ifloordiv__', W_wrap_binaryfunc),
('nb_inplace_lshift', '__ilshift__', W_wrap_binaryfunc),
('nb_inplace_multiply', '__imul__', W_wrap_binaryfunc),
('nb_inplace_or', '__ior__', W_wrap_binaryfunc),
# CPython is buggy here: it uses wrap_binaryfunc for nb_inplace_power, but
# it means you end up calling the cfunc with the wrong signature! We
# correctly user W_wrap_ternaryfunc instead
('nb_inplace_power', '__ipow__', W_wrap_ternaryfunc),
('nb_inplace_remainder', '__imod__', W_wrap_binaryfunc),
('nb_inplace_rshift', '__irshift__', W_wrap_binaryfunc),
('nb_inplace_subtract', '__isub__', W_wrap_binaryfunc),
('nb_inplace_true_divide', '__itruediv__', W_wrap_binaryfunc),
('nb_inplace_xor', '__ixor__', W_wrap_binaryfunc),
('nb_int', '__int__', W_wrap_unaryfunc),
('nb_invert', '__invert__', W_wrap_unaryfunc),
('nb_lshift', '__lshift__', W_wrap_binaryfunc),
('nb_multiply', '__mul__', W_wrap_binaryfunc),
('nb_negative', '__neg__', W_wrap_unaryfunc),
('nb_or', '__or__', W_wrap_binaryfunc),
('nb_positive', '__pos__', W_wrap_unaryfunc),
('nb_power', '__pow__', W_wrap_ternaryfunc),
('nb_remainder', '__mod__', W_wrap_binaryfunc),
('nb_rshift', '__rshift__', W_wrap_binaryfunc),
('nb_subtract', '__sub__', W_wrap_binaryfunc),
('nb_true_divide', '__truediv__', W_wrap_binaryfunc),
('nb_xor', '__xor__', W_wrap_binaryfunc),
('sq_ass_item', '__setitem__', W_wrap_sq_setitem),
('sq_ass_item', '__delitem__', W_wrap_sq_delitem),
('sq_concat', '__add__', W_wrap_binaryfunc),
('sq_contains', '__contains__', W_wrap_objobjproc),
('sq_inplace_concat', '__iadd__', W_wrap_binaryfunc),
('sq_inplace_repeat', '__imul__', W_wrap_indexargfunc),
('sq_item', '__getitem__', W_wrap_sq_item),
('sq_length', '__len__', W_wrap_lenfunc),
('sq_repeat', '__mul__', W_wrap_indexargfunc),
# ('tp_base', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_bases', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_call', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_clear', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_del', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_descr_get', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_descr_set', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_doc', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_getattr', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_getattro', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_hash', '__xxx__', AGS.W_SlotWrapper_...),
('tp_init', '__init__', W_wrap_init),
# ('tp_is_gc', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_iter', '__iter__', W_wrap_unaryfunc),
# ('tp_iternext', '__xxx__', AGS.W_SlotWrapper_...),
# tp_new SPECIAL-CASED
('tp_repr', '__repr__', W_wrap_unaryfunc),
# tp_richcompare SPECIAL-CASED
# ('tp_setattr', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_setattro', '__xxx__', AGS.W_SlotWrapper_...),
# ('tp_str', '__str__', W_wrap_unaryfunc),
# ('tp_traverse', '__xxx__', AGS.W_SlotWrapper_...),
('nb_matrix_multiply', '__matmul__', W_wrap_binaryfunc),
('nb_inplace_matrix_multiply', '__imatmul__', W_wrap_binaryfunc),
# ('am_await', '__await__', W_wrap_unaryfunc),
# ('am_aiter', '__aiter__', W_wrap_unaryfunc),
# ('am_anext', '__anext__', W_wrap_unaryfunc),
# ('tp_finalize', '__xxx__', AGS.W_SlotWrapper_...),
# extra HPy-specific slots
# ('tp_destroy', '__xxx__', AGS.W_SlotWrapper_...),
])
@specialize.arg(0)
def fill_slot(handles, w_type, hpyslot):
space = handles.space
slot_num = rffi.cast(lltype.Signed, hpyslot.c_slot)
# special cases
if slot_num == HPySlot_Slot.HPy_tp_new:
# this is the moral equivalent of CPython's add_tp_new_wrapper
cls = get_tp_new_wrapper_cls(handles)
w_func = cls(hpyslot.c_impl, w_type)
w_type.setdictvalue(space, '__new__', w_func)
return
elif slot_num == HPySlot_Slot.HPy_tp_destroy:
w_type.tp_destroy = llapi.cts.cast('HPyFunc_destroyfunc', hpyslot.c_impl)
return
elif slot_num == HPySlot_Slot.HPy_tp_richcompare:
for methname, opval in CMP_SLOTS:
cls = get_cmp_wrapper_cls(handles, methname, opval)
w_slot = cls(slot_num, methname, hpyslot.c_impl, w_type)
w_type.setdictvalue(space, methname, w_slot)
return
elif slot_num == HPySlot_Slot.HPy_bf_releasebuffer:
return
# generic cases
found = False
for slotname, methname, mixin in SLOTS:
assert methname != '__xxx__' # sanity check
n = getattr(HPySlot_Slot, 'HPy_' + slotname)
if slot_num == n:
found = True
cls = get_slot_cls(handles, mixin)
w_slot = cls(slot_num, methname, hpyslot.c_impl, w_type)
w_type.setdictvalue(space, methname, w_slot)
if not found:
raise oefmt(space.w_NotImplementedError, "Unimplemented slot: %s", str(slot_num))
|
py
|
1a5c9449a1b6e851aee677e56227aed62298b82a
|
"""
This class is inspired by the Black Knight scene in the movie
"Monty Python and the Holy Grail", where King Arthur fights the
Black Knight, slicing off his arms and legs, but the knight
refuses to concede defeat.
# BEGIN BLACK_KNIGHT_DEMO
>>> knight = BlackKnight()
>>> knight.member
next member is:
'an arm'
>>> del knight.member
BLACK KNIGHT (loses an arm)
-- 'Tis but a scratch.
>>> del knight.member
BLACK KNIGHT (loses another arm)
-- It's just a flesh wound.
>>> del knight.member
BLACK KNIGHT (loses a leg)
-- I'm invincible!
>>> del knight.member
BLACK KNIGHT (loses another leg)
-- All right, we'll call it a draw.
# END BLACK_KNIGHT_DEMO
"""
# BEGIN BLACK_KNIGHT
class BlackKnight:
def __init__(self):
self.members = ['an arm', 'another arm',
'a leg', 'another leg']
self.phrases = ["'Tis but a scratch.",
"It's just a flesh wound.",
"I'm invincible!",
"All right, we'll call it a draw."]
@property
def member(self):
print('next member is:')
return self.members[0]
@member.deleter
def member(self):
text = 'BLACK KNIGHT (loses {})\n-- {}'
print(text.format(self.members.pop(0), self.phrases.pop(0)))
# END BLACK_KNIGHT
|
py
|
1a5c9483af170ef4dbe03dd1781d457a9c9ad260
|
import _plotly_utils.basevalidators
class CustomdatasrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(self, plotly_name="customdatasrc", parent_name="icicle", **kwargs):
super(CustomdatasrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
py
|
1a5c954f32b6353601a5ecd952bd082b8b473e0b
|
import logging
import pytest
from toy_robot_challenge.command_processor import CommandProcessor
from toy_robot_challenge.positioning import Direction, Turn
from toy_robot_challenge.robot import Robot
@pytest.fixture()
def mock_robot(mocker):
return mocker.create_autospec(Robot, instance=True)
def test_create_command_processor(mock_robot):
command_processor = CommandProcessor(mock_robot)
assert command_processor._robot == mock_robot
def test_process_ignores_invalid_command(caplog, mock_robot):
caplog.set_level(logging.DEBUG)
command_processor = CommandProcessor(mock_robot)
command_processor.process("invalid-command")
assert mock_robot.place.called is False
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
assert "Detected command as invalid. Terminating command execution." in str(
caplog.records
)
@pytest.mark.parametrize(
"command", ["PLACE", "PLACE 1,2", "PLACE 1,NORTH,1", "PLACE 1,2,3", "PLACE NORTH"]
)
def test_process_does_not_place_robot_for_place_command_with_missing_place_arguments(
caplog, mock_robot, command
):
caplog.set_level(logging.DEBUG)
command_processor = CommandProcessor(mock_robot)
command_processor.process(command)
assert mock_robot.place.called is False
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
assert (
"PLACE command invoked but the arguments provided were invalid or missing (x,y,f)."
in str(caplog.records)
)
def test_process_does_not_place_robot_for_place_command_with_invalid_direction(
caplog, mock_robot
):
caplog.set_level(logging.DEBUG)
command_processor = CommandProcessor(mock_robot)
command_processor.process("PLACE 1,2,invalid")
assert mock_robot.place.called is False
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
assert "Invalid direction provided in PLACE command." in str(caplog.records)
def test_process_places_robot_for_valid_place_command(mock_robot):
command_processor = CommandProcessor(mock_robot)
command_processor.process("PLACE 1,2,NORTH")
mock_robot.place.assert_called_once_with(1, 2, Direction.NORTH)
assert mock_robot.report.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
def test_process_tells_robot_to_report_for_report_command(mock_robot):
command_processor = CommandProcessor(mock_robot)
command_processor.process("REPORT")
assert mock_robot.report.called is True
assert mock_robot.place.called is False
assert mock_robot.rotate.called is False
assert mock_robot.move.called is False
@pytest.mark.parametrize("command, turn", [("LEFT", Turn.LEFT), ("RIGHT", Turn.RIGHT)])
def test_process_tells_robot_to_rotate_for_left_or_right_command(
mock_robot, command, turn
):
command_processor = CommandProcessor(mock_robot)
command_processor.process(command)
mock_robot.rotate.assert_called_once_with(turn)
assert mock_robot.report.called is False
assert mock_robot.place.called is False
assert mock_robot.move.called is False
def test_process_tells_robot_to_move_for_move_command(mock_robot):
command_processor = CommandProcessor(mock_robot)
command_processor.process("MOVE")
assert mock_robot.move.called is True
assert mock_robot.report.called is False
assert mock_robot.place.called is False
assert mock_robot.rotate.called is False
|
py
|
1a5c95d5cfff4c0776ac5c9c401d01aee7d67621
|
#!/usr/bin/env python -O
# -*- coding: utf-8 -*-
#
# tests.unit._dao.TestRTKRPN.py is part of The RTK Project
#
# All rights reserved.
"""
This is the test class for testing the RTKRPN module algorithms and
models.
"""
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(dirname(__file__)))) + "/rtk", )
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
import unittest
from nose.plugins.attrib import attr
from dao.RTKRPN import RTKRPN
__author__ = 'Andrew Rowland'
__email__ = '[email protected]'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2017 Andrew "weibullguy" Rowland'
class TestRTKRPN(unittest.TestCase):
"""
Class for testing the RTKRPN class.
"""
attributes = (1, 'None', 'No effect.', 'severity', 1)
def setUp(self):
"""
Sets up the test fixture for the RTKRPN class.
"""
engine = create_engine('sqlite:////tmp/TestCommonDB.rtk', echo=False)
session = scoped_session(sessionmaker())
session.remove()
session.configure(bind=engine, autoflush=False, expire_on_commit=False)
self.DUT = session.query(RTKRPN).first()
self.DUT.name = self.attributes[1]
self.DUT.description = self.attributes[2]
self.DUT.rpn_type = self.attributes[3]
self.DUT.value = self.attributes[4]
session.commit()
@attr(all=True, unit=True)
def test00_RTKRPN_create(self):
"""
(TestRTKRPN) __init__ should create an RTKRPN model
"""
self.assertTrue(isinstance(self.DUT, RTKRPN))
# Verify class attributes are properly initialized.
self.assertEqual(self.DUT.__tablename__, 'rtk_rpn')
self.assertEqual(self.DUT.rpn_id, 1)
self.assertEqual(self.DUT.description, 'No effect.')
self.assertEqual(self.DUT.name, 'None')
self.assertEqual(self.DUT.rpn_type, 'severity')
self.assertEqual(self.DUT.value, 1)
@attr(all=True, unit=True)
def test01_RTKRPN_get_attributes(self):
"""
(TestRTKRPN) get_attributes should return a tuple of attributes values on success
"""
self.assertEqual(self.DUT.get_attributes(), self.attributes)
@attr(all=True, unit=True)
def test02a_RTKRPN_set_attributes(self):
"""
(TestRTKRPN) set_attributes should return a zero error code on success
"""
_attributes = ('Very High',
'System inoperable with destructive failure without ' \
'compromising safety.', 'severity', 8)
_error_code, _msg = self.DUT.set_attributes(_attributes)
self.assertEqual(_error_code, 0)
self.assertEqual(_msg, "RTK SUCCESS: Updating RTKRPN {0:d} " \
"attributes.".format(self.DUT.rpn_id))
@attr(all=True, unit=True)
def test02b_RTKRPN_set_attributes_to_few(self):
"""
(TestRTKRPN) set_attributes should return a 40 error code when passed too few attributes
"""
_attributes = ('Very High',
'System inoperable with destructive failure without ' \
'compromising safety.', 'severity')
_error_code, _msg = self.DUT.set_attributes(_attributes)
self.assertEqual(_error_code, 40)
self.assertEqual(_msg, "RTK ERROR: Insufficient number of input " \
"values to RTKRPN.set_attributes().")
@attr(all=True, unit=True)
def test02c_RTKRPN_set_attributes_wrong_type(self):
"""
(TestRTKRPN) set_attributes should return a 10 error code when passed the wrong type
"""
_attributes = ('Very High',
'System inoperable with destructive failure without ' \
'compromising safety.', 'severity', 'eight')
_error_code, _msg = self.DUT.set_attributes(_attributes)
self.assertEqual(_error_code, 10)
self.assertEqual(_msg, "RTK ERROR: Incorrect data type when " \
"converting one or more RTKRPN " \
"attributes.")
|
py
|
1a5c95ea3be4d6247a69e67800f45af0f7c4ac2f
|
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Standard library imports
import math
from typing import List
# Third-party imports
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
# First-party imports
from gluonts.block.feature import FeatureEmbedder
from gluonts.model.common import Tensor
from gluonts.core.component import validated
class LookupValues(gluon.HybridBlock):
def __init__(self, values: mx.nd.NDArray, **kwargs):
super().__init__(**kwargs)
with self.name_scope():
self.bin_values = self.params.get_constant("bin_values", values)
def hybrid_forward(self, F, indices, bin_values):
return F.take(bin_values, indices)
def conv1d(channels, kernel_size, in_channels, use_bias=True, **kwargs):
"""
Conv1D with better default initialization.
"""
n = in_channels
kernel_size = (
kernel_size if isinstance(kernel_size, list) else [kernel_size]
)
for k in kernel_size:
n *= k
stdv = 1.0 / math.sqrt(n)
winit = mx.initializer.Uniform(stdv)
if use_bias:
binit = mx.initializer.Uniform(stdv)
else:
binit = "zeros"
return nn.Conv1D(
channels=channels,
kernel_size=kernel_size,
in_channels=in_channels,
use_bias=use_bias,
weight_initializer=winit,
bias_initializer=binit,
**kwargs,
)
class CausalDilatedResidue(nn.HybridBlock):
def __init__(
self,
n_residue,
n_skip,
dilation,
return_dense_out,
kernel_size,
**kwargs,
):
super().__init__(**kwargs)
self.n_residue = n_residue
self.n_skip = n_skip
self.dilation = dilation
self.kernel_size = kernel_size
self.return_dense_out = return_dense_out
with self.name_scope():
self.conv_sigmoid = conv1d(
in_channels=n_residue,
channels=n_residue,
kernel_size=kernel_size,
dilation=dilation,
activation="sigmoid",
)
self.conv_tanh = conv1d(
in_channels=n_residue,
channels=n_residue,
kernel_size=kernel_size,
dilation=dilation,
activation="tanh",
)
self.skip = conv1d(
in_channels=n_residue, channels=n_skip, kernel_size=1
)
self.residue = (
conv1d(
in_channels=n_residue, channels=n_residue, kernel_size=1
)
if self.return_dense_out
else None
)
def hybrid_forward(self, F, x):
u = self.conv_sigmoid(x) * self.conv_tanh(x)
s = self.skip(u)
if not self.return_dense_out:
return s, F.zeros(shape=(1,))
output = self.residue(u)
output = output + F.slice_axis(
x, begin=(self.kernel_size - 1) * self.dilation, end=None, axis=-1
)
return s, output
class WaveNet(nn.HybridBlock):
def __init__(
self,
bin_values: List[float],
n_residue: int,
n_skip: int,
dilation_depth: int,
n_stacks: int,
act_type: str,
cardinality: List[int],
embedding_dimension: int,
pred_length: int,
**kwargs,
):
super().__init__(**kwargs)
self.dilation_depth = dilation_depth
self.pred_length = pred_length
self.mu = len(bin_values)
self.dilations = WaveNet._get_dilations(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
self.receptive_field = WaveNet.get_receptive_field(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
self.trim_lengths = [
sum(self.dilations) - sum(self.dilations[: i + 1])
for i, _ in enumerate(self.dilations)
]
with self.name_scope():
self.feature_embedder = FeatureEmbedder(
cardinalities=cardinality,
embedding_dims=[embedding_dimension for _ in cardinality],
)
# self.post_transform = LookupValues(mx.nd.array(bin_values))
self.target_embed = nn.Embedding(
input_dim=self.mu, output_dim=n_residue
)
self.residuals = nn.HybridSequential()
for i, d in enumerate(self.dilations):
is_not_last = i + 1 < len(self.dilations)
self.residuals.add(
CausalDilatedResidue(
n_residue=n_residue,
n_skip=n_skip,
dilation=d,
return_dense_out=is_not_last,
kernel_size=2,
)
)
std = 1.0 / math.sqrt(n_residue)
self.conv_project = nn.Conv1D(
channels=n_residue,
kernel_size=1,
use_bias=True,
weight_initializer=mx.init.Uniform(std),
bias_initializer="zero",
)
self.conv1 = conv1d(
in_channels=n_skip, channels=n_skip, kernel_size=1
)
self.conv2 = conv1d(
in_channels=n_skip, channels=self.mu, kernel_size=1
)
self.output_act = (
nn.ELU()
if act_type == "elu"
else nn.Activation(activation=act_type)
)
self.cross_entropy_loss = gluon.loss.SoftmaxCrossEntropyLoss()
@staticmethod
def _get_dilations(dilation_depth, n_stacks):
return [2 ** i for i in range(dilation_depth)] * n_stacks
@staticmethod
def get_receptive_field(dilation_depth, n_stacks):
"""
Return the length of the receptive field
"""
dilations = WaveNet._get_dilations(
dilation_depth=dilation_depth, n_stacks=n_stacks
)
return sum(dilations) + 1
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
future_target: Tensor,
future_observed_values: Tensor,
scale: Tensor,
) -> Tensor:
embedded_cat = self.feature_embedder(feat_static_cat)
static_feat = F.concat(embedded_cat, F.log(scale + 1.0), dim=1)
full_target = F.concat(past_target, future_target, dim=-1).astype(
"int32"
)
full_observed = F.expand_dims(
F.concat(past_observed_values, future_observed_values, dim=-1),
axis=1,
)
full_time_features = F.concat(past_time_feat, future_time_feat, dim=-1)
repeated_static_feat = F.repeat(
F.expand_dims(static_feat, axis=-1),
repeats=self.pred_length + self.receptive_field,
axis=-1,
)
full_features = F.concat(
full_time_features, full_observed, repeated_static_feat, dim=1
)
# (batch_size, embed_dim, sequence_length)
o = self.target_embed(
F.slice_axis(full_target, begin=0, end=-1, axis=-1)
).swapaxes(1, 2)
o = F.concat(
o, F.slice_axis(full_features, begin=1, end=None, axis=-1), dim=1
)
o = self.conv_project(o)
skip_outs = []
for i, d in enumerate(self.dilations):
skip, o = self.residuals[i](o)
skip_trim = F.slice_axis(
skip, begin=self.trim_lengths[i], end=None, axis=-1
)
skip_outs.append(skip_trim)
y = sum(skip_outs)
y = self.output_act(y)
y = self.conv1(y)
y = self.output_act(y)
y = self.conv2(y)
unnormalized_output = y.swapaxes(1, 2)
label = F.slice_axis(
full_target, begin=self.receptive_field, end=None, axis=-1
)
loss_weight = F.slice_axis(
full_observed, begin=self.receptive_field, end=None, axis=-1
)
loss_weight = F.expand_dims(loss_weight, axis=2)
loss = self.cross_entropy_loss(unnormalized_output, label, loss_weight)
return loss
class WaveNetSampler(WaveNet):
"""
Runs Wavenet generation in an auto-regressive manner using caching for
speedup [PKC+16]_.
Same arguments as WaveNet. In addition
Parameters
----------
pred_length
Length of the prediction horizon
num_samples
Number of sample paths to generate in parallel in the graph
temperature
If set to 1.0 (default), sample according to estimated probabilities, if set to 0.0
most likely sample at each step is chosen.
post_transform
An optional post transform that will be applied to the samples
"""
@validated()
def __init__(
self,
bin_values: List[float],
num_samples: int,
temperature: float = 1.0,
**kwargs,
):
"""
Same arguments as WaveNet. In addition
:param pred_length: prediction length
:param num_samples: number of sample paths to generate in parallel in the graph
:param temperature: if set to 1.0 (default), sample according to estimated probabilities
- if set to 0.0 most likely sample at each step is chosen.
:param post_transform: An optional post transform that will be applied to the samples.
"""
super().__init__(bin_values=bin_values, **kwargs)
self.num_samples = num_samples
self.temperature = temperature
with self.name_scope():
self.post_transform = LookupValues(mx.nd.array(bin_values))
def hybrid_forward(
self,
F,
feat_static_cat: Tensor,
past_target: Tensor,
past_observed_values: Tensor,
past_time_feat: Tensor,
future_time_feat: Tensor,
scale: Tensor,
) -> Tensor:
embedded_cat = self.feature_embedder(feat_static_cat)
static_feat = F.concat(embedded_cat, F.log(scale + 1.0), dim=1)
past_target = past_target.astype("int32")
def blow_up(u):
"""
Expand to (batch_size x num_samples)
"""
return F.repeat(u, repeats=self.num_samples, axis=0)
def is_last_layer(i):
return i + 1 == len(self.dilations)
queues = []
full_time_features = F.concat(past_time_feat, future_time_feat, dim=-1)
future_observed_values = F.slice_axis(
future_time_feat, begin=0, end=1, axis=1
).ones_like()
full_observed = F.concat(
F.expand_dims(past_observed_values, axis=1),
future_observed_values,
dim=-1,
)
repeated_static_feat = F.repeat(
F.expand_dims(static_feat, axis=-1),
repeats=self.pred_length + self.receptive_field,
axis=-1,
)
full_features = F.concat(
full_time_features, full_observed, repeated_static_feat, dim=1
)
feature_slice = F.slice_axis(
full_features,
begin=-self.pred_length - self.receptive_field + 1,
end=None,
axis=-1,
)
tmp = F.slice_axis(
past_target, begin=-self.receptive_field, end=None, axis=-1
)
o = self.target_embed(tmp).swapaxes(1, 2)
o = F.concat(
o,
F.slice_axis(
feature_slice, begin=-self.receptive_field, end=None, axis=-1
),
dim=1,
)
o = self.conv_project(o)
for i, d in enumerate(self.dilations):
sz = 1 if d == 2 ** (self.dilation_depth - 1) else d * 2
_, o = self.residuals[i](o)
if not is_last_layer(i):
o_chunk = F.slice_axis(o, begin=-sz - 1, end=-1, axis=-1)
else:
o_chunk = o
queues.append(blow_up(o_chunk))
res = F.slice_axis(past_target, begin=-2, end=None, axis=-1)
res = blow_up(res)
for n in range(self.pred_length):
queues_next = []
o = self.target_embed(
F.slice_axis(res, begin=-2, end=None, axis=-1)
).swapaxes(1, 2)
b = F.slice_axis(
full_features,
begin=self.receptive_field + n - 1,
end=self.receptive_field + n + 1,
axis=-1,
)
b = blow_up(b)
o = F.concat(o, b, dim=1)
o = self.conv_project(o)
skip_outs = []
for i, d in enumerate(self.dilations):
skip, o = self.residuals[i](o)
skip_outs.append(skip)
if not is_last_layer(i):
q = queues[i]
o = F.concat(q, o, num_args=2, dim=-1)
queues_next.append(
F.slice_axis(o, begin=1, end=None, axis=-1)
)
queues = queues_next
y = sum(skip_outs)
y = self.output_act(y)
y = self.conv1(y)
y = self.output_act(y)
unnormalized_outputs = self.conv2(y)
if self.temperature > 0:
probs = F.softmax(
unnormalized_outputs / self.temperature, axis=1
)
y = F.sample_multinomial(probs.swapaxes(1, 2))
else:
y = F.argmax(unnormalized_outputs, axis=1)
y = y.astype("int32")
res = F.concat(res, y, num_args=2, dim=-1)
samples = F.slice_axis(res, begin=-self.pred_length, end=None, axis=-1)
samples = samples.reshape(
shape=(-1, self.num_samples, self.pred_length)
)
samples = self.post_transform(samples)
samples = F.broadcast_mul(scale.expand_dims(axis=1), samples)
return samples
|
py
|
1a5c97f2e4e7f70007a1cb3388ce2a205339a2a5
|
from typing_extensions import Literal
from pydantic import BaseModel, Field, conint
from enum import Enum
class EdgeType(str, Enum):
"""
An enumeration for the types of edges.
"""
Surface = "Surface"
Space = "Space"
Flight = "Flight"
class Edge(BaseModel):
"""
Base class for all edges.
"""
name: str = Field(
...,
title="Name",
description="name of the edge",
)
description: str = Field(
...,
title="Description",
description="short description of the edge",
)
origin_id: conint(strict=True) = Field(
...,
title="Origin ID",
description="ID of the origin node"
)
destination_id: conint(strict=True) = Field(
...,
title="Destination ID",
description="ID of the destination node",
)
class SurfaceEdge(Edge):
"""
An edge between two surface nodes.
"""
type: Literal[EdgeType.Surface] = Field(
title="Type",
description="Type of edge",
)
distance: float = Field(
...,
title="Distance",
description="Distance of surface edge",
ge=0
)
class SpaceEdge(Edge):
"""
An edge between two nodes using a specified list of propulsive burns.
"""
type: Literal[EdgeType.Space] = Field(
title="Type",
description="Type of edge",
)
duration: float = Field(
...,
title="Duration",
description="Duration of space edge",
ge=0
)
class FlightEdge(Edge):
"""
An edge between two nodes using flight architectures that are known to close
with a given cargo and crew capacity.
"""
type: Literal[EdgeType.Flight] = Field(
...,
title="Type",
description="Type of edge",
)
duration: float = Field(
...,
title="duration",
description="Duration of flight edge",
ge=0
)
max_crew: conint(strict=True, ge=0) = Field(
...,
title="Max Crew",
description="Crew capacity for flight",
)
max_cargo: float = Field(
...,
title="Max Cargo",
description="Cargo capacity for flight",
ge=0
)
|
py
|
1a5c97fedf7cd8e8b1b3bc5bf33ca23b90d0b8ed
|
from VMWConfigFile import *
from pyVim import connect
from pyVim.connect import SmartConnect, Disconnect
from pyVmomi import vim, vmodl
import atexit
import os
import ssl
import requests
import argparse
import time
# Disabling urllib3 ssl warnings
requests.packages.urllib3.disable_warnings()
# Disabling SSL certificate verification
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_NONE
def get_vim_objects(content, vim_type):
'''Get vim objects of a given type.'''
return [item for item in content.viewManager.CreateContainerView(
content.rootFolder, [vim_type], recursive=True
).view]
def getDatastoreId(name):
ds_list = getAllDatastores()
print(ds_list)
for ds in ds_list:
if ds['name'] == name:
return ds['moId']
return ""
def getAllDatastores():
try:
si = None
try:
si = connect.SmartConnect(host=vc_settings["vcenter"],
user=vc_settings["user"],
pwd=vc_settings["password"],
port=443,
sslContext=context)
except IOError as e:
pass
atexit.register(Disconnect, si)
content = si.RetrieveContent()
obj_view = content.viewManager.CreateContainerView(content.rootFolder,[vim.Datastore],True)
ds_list = obj_view.view
obj_view.Destroy()
datastores = []
for ds in ds_list:
datastores.append({'name' : ds.name, 'moId' : ds._moId})
except vmodl.MethodFault as e:
print("Caught vmodl fault: %s" % e.msg)
return 1
except Exception as e:
print("Caught exception: %s" % str(e))
return 1
return datastores
|
py
|
1a5c9a47758c5d31c56717edb5d7a1eefe26eeba
|
# -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.tenant
class ClusterAuditLog(object):
"""Implementation of the 'ClusterAuditLog' model.
Specifies information about a single Cluster audit log.
When an action (such as pausing a Protection Job) occurs, an audit log is
generated that provides details about the action.
Attributes:
action (string): Specifies the action that caused the log to be
generated.
details (string): Specifies more information about the action.
domain (string): Specifies the domain of the user who caused the
action that generated the log.
entity_id (string): Specifies the id of the entity (object) that the
action is invoked on.
entity_name (string): Specifies the entity (object) name that the
action is invoked on. For example, if a Job called BackupEng is
paused, this field returns BackupEng.
entity_type (string): Specifies the type of the entity (object) that
the action is invoked on. For example, if a Job called BackupEng
is paused, this field returns 'Protection Job'.
human_timestamp (string): Specifies the time when the log was
generated. The time is specified using a human readable
timestamp.
impersonation (bool): Specifies if the log was generated during
impersonation.
new_record (string): Specifies the record after the action is
invoked.
original_tenant (Tenant): Specifies details about a tenant.
previous_record (string): Specifies the record before the action is
invoked.
tenant (Tenant): Specifies details about a tenant.
timestamp_usecs (long|int): Specifies the time when the log was
generated. The time is specified using a Unix epoch Timestamp (in
microseconds).
user_name (string): Specifies the user who caused the action that
generated the log.
"""
# Create a mapping from Model property names to API property names
_names = {
"action":'action',
"details":'details',
"domain":'domain',
"entity_id":'entityId',
"entity_name":'entityName',
"entity_type":'entityType',
"human_timestamp":'humanTimestamp',
"impersonation":'impersonation',
"new_record":'newRecord',
"original_tenant":'originalTenant',
"previous_record":'previousRecord',
"tenant":'tenant',
"timestamp_usecs":'timestampUsecs',
"user_name":'userName'
}
def __init__(self,
action=None,
details=None,
domain=None,
entity_id=None,
entity_name=None,
entity_type=None,
human_timestamp=None,
impersonation=None,
new_record=None,
original_tenant=None,
previous_record=None,
tenant=None,
timestamp_usecs=None,
user_name=None):
"""Constructor for the ClusterAuditLog class"""
# Initialize members of the class
self.action = action
self.details = details
self.domain = domain
self.entity_id = entity_id
self.entity_name = entity_name
self.entity_type = entity_type
self.human_timestamp = human_timestamp
self.impersonation = impersonation
self.new_record = new_record
self.original_tenant = original_tenant
self.previous_record = previous_record
self.tenant = tenant
self.timestamp_usecs = timestamp_usecs
self.user_name = user_name
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
action = dictionary.get('action')
details = dictionary.get('details')
domain = dictionary.get('domain')
entity_id = dictionary.get('entityId')
entity_name = dictionary.get('entityName')
entity_type = dictionary.get('entityType')
human_timestamp = dictionary.get('humanTimestamp')
impersonation = dictionary.get('impersonation')
new_record = dictionary.get('newRecord')
original_tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('originalTenant')) if dictionary.get('originalTenant') else None
previous_record = dictionary.get('previousRecord')
tenant = cohesity_management_sdk.models.tenant.Tenant.from_dictionary(dictionary.get('tenant')) if dictionary.get('tenant') else None
timestamp_usecs = dictionary.get('timestampUsecs')
user_name = dictionary.get('userName')
# Return an object of this model
return cls(action,
details,
domain,
entity_id,
entity_name,
entity_type,
human_timestamp,
impersonation,
new_record,
original_tenant,
previous_record,
tenant,
timestamp_usecs,
user_name)
|
py
|
1a5c9a770e77fb3e43a34b82ee41055366768d07
|
# coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class K8sIoApiCoreV1HTTPGetAction(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'host': 'str',
'http_headers': 'list[K8sIoApiCoreV1HTTPHeader]',
'path': 'str',
'scheme': 'str'
}
attribute_map = {
'host': 'host',
'http_headers': 'httpHeaders',
'path': 'path',
'scheme': 'scheme'
}
def __init__(self, host=None, http_headers=None, path=None, scheme=None):
"""
K8sIoApiCoreV1HTTPGetAction - a model defined in Swagger
"""
self._host = None
self._http_headers = None
self._path = None
self._scheme = None
if host is not None:
self.host = host
if http_headers is not None:
self.http_headers = http_headers
if path is not None:
self.path = path
if scheme is not None:
self.scheme = scheme
@property
def host(self):
"""
Gets the host of this K8sIoApiCoreV1HTTPGetAction.
Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.
:return: The host of this K8sIoApiCoreV1HTTPGetAction.
:rtype: str
"""
return self._host
@host.setter
def host(self, host):
"""
Sets the host of this K8sIoApiCoreV1HTTPGetAction.
Host name to connect to, defaults to the pod IP. You probably want to set \"Host\" in httpHeaders instead.
:param host: The host of this K8sIoApiCoreV1HTTPGetAction.
:type: str
"""
self._host = host
@property
def http_headers(self):
"""
Gets the http_headers of this K8sIoApiCoreV1HTTPGetAction.
Custom headers to set in the request. HTTP allows repeated headers.
:return: The http_headers of this K8sIoApiCoreV1HTTPGetAction.
:rtype: list[K8sIoApiCoreV1HTTPHeader]
"""
return self._http_headers
@http_headers.setter
def http_headers(self, http_headers):
"""
Sets the http_headers of this K8sIoApiCoreV1HTTPGetAction.
Custom headers to set in the request. HTTP allows repeated headers.
:param http_headers: The http_headers of this K8sIoApiCoreV1HTTPGetAction.
:type: list[K8sIoApiCoreV1HTTPHeader]
"""
self._http_headers = http_headers
@property
def path(self):
"""
Gets the path of this K8sIoApiCoreV1HTTPGetAction.
Path to access on the HTTP server.
:return: The path of this K8sIoApiCoreV1HTTPGetAction.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this K8sIoApiCoreV1HTTPGetAction.
Path to access on the HTTP server.
:param path: The path of this K8sIoApiCoreV1HTTPGetAction.
:type: str
"""
self._path = path
@property
def scheme(self):
"""
Gets the scheme of this K8sIoApiCoreV1HTTPGetAction.
Scheme to use for connecting to the host. Defaults to HTTP.
:return: The scheme of this K8sIoApiCoreV1HTTPGetAction.
:rtype: str
"""
return self._scheme
@scheme.setter
def scheme(self, scheme):
"""
Sets the scheme of this K8sIoApiCoreV1HTTPGetAction.
Scheme to use for connecting to the host. Defaults to HTTP.
:param scheme: The scheme of this K8sIoApiCoreV1HTTPGetAction.
:type: str
"""
self._scheme = scheme
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, K8sIoApiCoreV1HTTPGetAction):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py
|
1a5c9cf531084fa4b9fc871c1ae2330b17636d96
|
# coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 10
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from isi_sdk_9_0_0.models.certificate_settings_settings import CertificateSettingsSettings # noqa: F401,E501
class CertificateSettings(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'settings': 'CertificateSettingsSettings'
}
attribute_map = {
'settings': 'settings'
}
def __init__(self, settings=None): # noqa: E501
"""CertificateSettings - a model defined in Swagger""" # noqa: E501
self._settings = None
self.discriminator = None
if settings is not None:
self.settings = settings
@property
def settings(self):
"""Gets the settings of this CertificateSettings. # noqa: E501
# noqa: E501
:return: The settings of this CertificateSettings. # noqa: E501
:rtype: CertificateSettingsSettings
"""
return self._settings
@settings.setter
def settings(self, settings):
"""Sets the settings of this CertificateSettings.
# noqa: E501
:param settings: The settings of this CertificateSettings. # noqa: E501
:type: CertificateSettingsSettings
"""
self._settings = settings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CertificateSettings):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py
|
1a5c9d7db5681a71903c37d5422071f9902c0fe4
|
IG_USERNAME = "memersgansta"
IG_PASSWORD = "Itiscomplsxtocrack12@"
|
py
|
1a5c9da7fa7a0da648b175fafbed9ab51c2dc213
|
# Task 1
# Given two whole numbers - the lengths of the legs of a right-angled triangle - output its area.
a = 1
b = 2
s = a * b / 2
print("The area is ", s)
# Task 2
# Input a natural number n and output its last digit.
n = input("Input a natural number ")
print(str(n)[-1])
# Task 3
# Input a two-digit natural number and output the sum of its digits.
n = int(input("Input a two-digit natural number "))
print(n // 10 + n % 10)
# Task 4
# You are given the first and second number in an arithmetic progression and natural number n. Find n-th element of arithmetic progression.
n1 = 2
n2 = 4
n = 7
n_th = n1 + (n - 1) * (n2 - n1)
print(n, "-th number is ", n_th)
|
py
|
1a5c9e243ab63d3e752d0b8e010cca61ee299ca4
|
# -*- coding: utf-8 -*-
# Based on:
# http://docs.opencv.org/trunk/d7/d8b/tutorial_py_lucas_kanade.html
# https://github.com/opencv/opencv/blob/master/samples/python/opt_flow.py
#
# Outputs image where direction responds to hue, length by brightness
# 0° Blue, 60° Magenta, 120° Red, 180° Yellow, 240° Green, 300° Cyan
import argparse
import cv2
import locale
import os
from glob import glob
import numpy as np
from pprint import pprint
import sys
try:
locale.setlocale(locale.LC_ALL, 'en_US')
except locale.Error:
locale.setlocale(locale.LC_ALL, 'english-us')
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_DIR", default="frames/*.png", help="Path to frames directory")
parser.add_argument('-out', dest="OUTPUT_DIR", default="frames_flow/", help="Path to output directory")
# init input
args = parser.parse_args()
# if not os.path.exists(args.OUTPUT_DIR):
# os.makedirs(args.OUTPUT_DIR)
def drawHsv(flow):
h, w = flow.shape[:2]
mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
hsv = np.zeros((h, w, 3), np.uint8)
hsv[...,0] = ang*180/np.pi/2
hsv[...,1] = 255
hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
return bgr
frames = glob(args.INPUT_DIR)
frameCount = len(frames)
print "Found %s frames" % locale.format("%d", frameCount, grouping=True)
frames.sort()
prvs = None
for i, f in enumerate(frames):
im = cv2.imread(f)
if prvs is None:
prvs = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
continue
nxt = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs,nxt, None, 0.5, 3, 15, 3, 5, 1.2, 0)
bgr = drawHsv(flow)
cv2.imshow('frame',bgr)
cv2.waitKey(30)
prvs = nxt
cv2.destroyAllWindows()
|
py
|
1a5c9eb42819eae80b8f9056a441ce93e624ad8d
|
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Test grammar induction performance of StructFormer."""
import argparse
import collections
import matplotlib.pyplot as plt
from nltk.parse import DependencyGraph
import numpy
import torch
from structformer import data_ptb
from structformer import tree_utils
from structformer.hinton import plot
def mean(x):
return sum(x) / len(x)
@torch.no_grad()
def test(parser, corpus, device, prt=False, gap=0):
"""Compute UF1 and UAS scores.
Args:
parser: pretrained model
corpus: labeled corpus
device: cpu or gpu
prt: bool, whether print examples
gap: distance gap for building non-binary tree
Returns:
UF1: unlabeled F1 score for constituency parsing
"""
parser.eval()
prec_list = []
reca_list = []
f1_list = []
dtree_list = []
corpus_sys = {}
corpus_ref = {}
nsens = 0
word2idx = corpus.dictionary.word2idx
dataset = zip(corpus.test_sens, corpus.test_trees, corpus.test_nltktrees)
for sen, sen_tree, sen_nltktree in dataset:
x = [word2idx[w] if w in word2idx else word2idx['<unk>'] for w in sen]
data = torch.LongTensor([x]).to(device)
pos = torch.LongTensor([list(range(len(sen)))]).to(device)
_, p_dict = parser(data, pos)
block = p_dict['block']
cibling = p_dict['cibling']
head = p_dict['head']
distance = p_dict['distance']
height = p_dict['height']
distance = distance.clone().squeeze(0).cpu().numpy().tolist()
height = height.clone().squeeze(0).cpu().numpy().tolist()
head = head.clone().squeeze(0).cpu().numpy()
max_height = numpy.max(height)
parse_tree = tree_utils.build_tree(distance, sen, gap=gap)
model_out, _ = tree_utils.get_brackets(parse_tree)
std_out, _ = tree_utils.get_brackets(sen_tree)
overlap = model_out.intersection(std_out)
corpus_sys[nsens] = tree_utils.mrg(parse_tree)
corpus_ref[nsens] = tree_utils.mrg_labeled(sen_nltktree)
prec = float(len(overlap)) / (len(model_out) + 1e-8)
reca = float(len(overlap)) / (len(std_out) + 1e-8)
if not std_out:
reca = 1.
if not model_out:
prec = 1.
f1 = 2 * prec * reca / (prec + reca + 1e-8)
prec_list.append(prec)
reca_list.append(reca)
f1_list.append(f1)
new_words = []
true_words = sen_nltktree.pos()
for d, c, w, ph in zip(distance, height, sen, head):
next_word = true_words.pop(0)
while next_word[1] not in data_ptb.WORD_TAGS:
next_word = true_words.pop(0)
new_words.append({
'address': len(new_words) + 1,
'word': next_word[0],
'lemma': None,
'ctag': None,
'tag': next_word[1],
'feats': None,
'head': numpy.argmax(ph) + 1 if c < max_height else 0,
'deps': collections.defaultdict(list),
'rel': None,
'distance': d,
'height': c
})
while true_words:
next_word = true_words.pop(0)
assert next_word[1] not in data_ptb.WORD_TAGS
dtree = DependencyGraph()
for w in new_words:
dtree.add_node(w)
dtree_list.append(dtree)
if prt and len(dtree_list) % 100 == 0:
cibling = cibling.clone().squeeze(0).cpu().numpy()
block = block.clone().squeeze(0).cpu().numpy()
for word_i, d_i, imp_i, block_i, cibling_i, head_i in zip(
sen, distance, height, block, cibling, head):
print('%20s\t%10.2f\t%5.2f\t%s\t%s\t%s' %
(word_i, d_i, imp_i, plot(block_i, max_val=1.),
plot(head_i, max_val=1), plot(cibling_i, max_val=1.)))
print('Standard output:', sen_tree)
print('Model output:', parse_tree)
print(dtree.to_conll(10))
print()
fig_i, ax_i = plt.subplots()
ax_i.set_xticks(numpy.arange(len(sen)))
ax_i.set_yticks(numpy.arange(len(sen)))
ax_i.set_xticklabels(sen)
ax_i.set_yticklabels(sen)
plt.setp(
ax_i.get_xticklabels(),
rotation=45,
ha='right',
rotation_mode='anchor')
for row in range(len(sen)):
for col in range(len(sen)):
_ = ax_i.text(
col,
row,
'%.2f' % (head[row, col]),
ha='center',
va='center',
color='w')
fig_i.tight_layout()
plt.savefig(
'./figures/sentence-%d.png' % (len(dtree_list)),
dpi=300,
format='png')
nsens += 1
print('Constituency parsing performance:')
print('Mean Prec: %.4f, Mean Reca: %.4f, Mean F1: %.4f' %
(mean(prec_list), mean(reca_list), mean(f1_list)))
correct, total = tree_utils.corpus_stats_labeled(corpus_sys, corpus_ref)
print(correct)
print(total)
print('SBAR: %.4f' % (correct['SBAR'] / total['SBAR']))
print('NP: %.4f' % (correct['NP'] / total['NP']))
print('VP: %.4f' % (correct['VP'] / total['VP']))
print('PP: %.4f' % (correct['PP'] / total['PP']))
print('ADJP: %.4f' % (correct['ADJP'] / total['ADJP']))
print('ADVP: %.4f' % (correct['ADVP'] / total['ADVP']))
print(tree_utils.corpus_average_depth(corpus_sys))
print('-' * 89)
print('Dependency parsing performance:')
print('Stanford Style:')
tree_utils.evald(dtree_list, '../data/ptb/test.stanford', directed=True)
tree_utils.evald(dtree_list, '../data/ptb/test.stanford', directed=False)
print('Conll Style:')
tree_utils.evald(dtree_list, '../data/ptb/test.conll', directed=True)
tree_utils.evald(dtree_list, '../data/ptb/test.conll', directed=False)
return mean(f1_list)
if __name__ == '__main__':
marks = [' ', '-', '=']
numpy.set_printoptions(precision=2, suppress=True, linewidth=5000)
argpr = argparse.ArgumentParser(description='PyTorch PTB Language Model')
# Model parameters.
argpr.add_argument(
'--data',
type=str,
default='data/penn/',
help='location of the data corpus')
argpr.add_argument(
'--checkpoint',
type=str,
default='PTB.pt',
help='model checkpoint to use')
argpr.add_argument('--seed', type=int, default=1111, help='random seed')
argpr.add_argument('--gap', type=float, default=0, help='random seed')
argpr.add_argument('--print', action='store_true', help='use CUDA')
argpr.add_argument('--cuda', action='store_true', help='use CUDA')
argpr.add_argument('--wsj10', action='store_true', help='use WSJ10')
args = argpr.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
# Load model
print('Loading model...')
with open(args.checkpoint, 'rb') as f:
model, _, _, _ = torch.load(f)
torch.cuda.manual_seed(args.seed)
model.cpu()
if args.cuda:
model.cuda()
# Load data
print('Loading PTB dataset...')
ptb_corpus = data_ptb.Corpus(args.data)
print('Evaluating...')
if args.cuda:
eval_device = torch.device('cuda:0')
else:
eval_device = torch.device('cpu')
print('=' * 89)
test(model, ptb_corpus, eval_device, prt=args.print, gap=args.gap)
print('=' * 89)
rel_weight = model.rel_weight.detach().cpu().numpy()
fig, axs = plt.subplots(8, 8, sharex=True, sharey=True)
names = ['p', 'd']
for i in range(rel_weight.shape[0]):
for j in range(rel_weight.shape[1]):
print(plot(rel_weight[i, j], max_val=1.), end=' ')
values = rel_weight[i, j]
if i == 0:
axs[i, j].set_title('%d' % (j,))
if j == 0:
axs[i, j].set_ylabel('%d' % (i,))
axs[i, j].bar(names, values)
print()
plt.savefig('./figures/mask_weights.png', dpi=300, format='png')
|
py
|
1a5c9fe7e9d4dbb59343d9a22d5ebc30ce893f7e
|
# this code allow the user to input their answers.
from django.db import models
# Create your models here.
class Dog(models.Model):
name= models.CharField(max_length=50)
breed = models.CharField(max_length=100)
color= models.CharField(max_length=50)
gender= models.CharField(max_length=20)
|
py
|
1a5ca0928b178d9cc86a0f751e599a21c6ed2203
|
# vim: set fileencoding=utf-8 :
"""
~~~~~~~~~
Utilities
~~~~~~~~~
"""
from __future__ import absolute_import, division
import copy
from sqlalchemy import inspect
from sqlalchemy.ext.associationproxy import _AssociationList
from sqlalchemy.orm.dynamic import AppenderMixin
from sqlalchemy.orm.query import Query
from dictalchemy import constants
from dictalchemy import errors
def arg_to_dict(arg):
"""Convert an argument that can be None, list/tuple or dict to dict
Example::
>>> arg_to_dict(None)
[]
>>> arg_to_dict(['a', 'b'])
{'a':{},'b':{}}
>>> arg_to_dict({'a':{'only': 'id'}, 'b':{'only': 'id'}})
{'a':{'only':'id'},'b':{'only':'id'}}
:return: dict with keys and dict arguments as value
"""
if arg is None:
arg = []
try:
arg = dict(arg)
except ValueError:
arg = dict.fromkeys(list(arg), {})
return arg
def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None,
follow=None, include=None, only=None, method='asdict', **kwargs):
"""Get a dict from a model
Using the `method` parameter makes it possible to have multiple methods
that formats the result.
Additional keyword arguments will be passed to all relationships that are
followed. This can be used to pass on things like request or context.
:param follow: List or dict of relationships that should be followed.
If the parameter is a dict the value should be a dict of \
keyword arguments. Currently it follows InstrumentedList, \
MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \
takes an extra argument, 'method', which is the method that \
should be used on the relation. It also takes the extra argument \
'parent' which determines where the relationships data should be \
added in the response dict. If 'parent' is set the relationship \
will be added with it's own key as a child to `parent`.
:param exclude: List of properties that should be excluded, will be \
merged with `model.dictalchemy_exclude`
:param exclude_pk: If True any column that refers to the primary key will \
be excluded.
:param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\
if set
:param include: List of properties that should be included. Use this to \
allow python properties to be called. This list will be merged \
with `model.dictalchemy_asdict_include` or \
`model.dictalchemy_include`.
:param only: List of properties that should be included. This will \
override everything else except `follow`.
:param method: Name of the method that is currently called. This will be \
the default method used in 'follow' unless another method is\
set.
:raises: :class:`dictalchemy.errors.MissingRelationError` \
if `follow` contains a non-existent relationship.
:raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \
contains an existing relationship that currently isn't supported.
:returns: dict
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
if only:
attrs = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
if exclude_pk is True:
exclude += [c.key for c in info.mapper.primary_key]
include = (include or []) + (getattr(model,
'dictalchemy_asdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
attrs = [k for k in columns + synonyms + include if k not in exclude]
data = dict([(k, getattr(model, k)) for k in attrs])
for (rel_key, orig_args) in follow.iteritems():
try:
rel = getattr(model, rel_key)
except AttributeError:
raise errors.MissingRelationError(rel_key)
args = copy.deepcopy(orig_args)
method = args.pop('method', method)
args['method'] = method
args.update(copy.copy(kwargs))
if hasattr(rel, method):
rel_data = getattr(rel, method)(**args)
elif isinstance(rel, (list, _AssociationList)):
rel_data = []
for child in rel:
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args))
else:
try:
rel_data.append(dict(child))
# TypeError is for non-dictable children
except TypeError:
rel_data.append(copy.copy(child))
elif isinstance(rel, dict):
rel_data = {}
for (child_key, child) in rel.iteritems():
if hasattr(child, method):
rel_data[child_key] = getattr(child, method)(**args)
else:
try:
rel_data[child_key] = dict(child)
except ValueError:
rel_data[child_key] = copy.copy(child)
elif isinstance(rel, (AppenderMixin, Query)):
rel_data = []
for child in rel.all():
if hasattr(child, method):
rel_data.append(getattr(child, method)(**args))
else:
rel_data.append(dict(child))
elif rel is None:
rel_data = None
else:
raise errors.UnsupportedRelationError(rel_key)
ins_key = args.pop('parent', None)
if ins_key is None:
data[rel_key] = rel_data
else:
if ins_key not in data:
data[ins_key] = {}
data[ins_key][rel_key] = rel_data
return data
def fromdict(model, data, exclude=None, exclude_underscore=None,
allow_pk=None, follow=None, include=None, only=None):
"""Update a model from a dict
Works almost identically as :meth:`dictalchemy.utils.asdict`. However, it
will not create missing instances or update collections.
This method updates the following properties on a model:
* Simple columns
* Synonyms
* Simple 1-m relationships
:param data: dict of data
:param exclude: list of properties that should be excluded
:param exclude_underscore: If True underscore properties will be excluded,\
if set to None model.dictalchemy_exclude_underscore will be used.
:param allow_pk: If True any column that refers to the primary key will \
be excluded. Defaults model.dictalchemy_fromdict_allow_pk or \
dictable.constants.fromdict_allow_pk. If set to True a primary \
key can still be excluded with the `exclude` parameter.
:param follow: Dict of relations that should be followed, the key is the \
arguments passed to the relation. Relations only works on simple \
relations, not on lists.
:param include: List of properties that should be included. This list \
will override anything in the exclude list. It will not override \
allow_pk.
:param only: List of the only properties that should be set. This \
will not override `allow_pk` or `follow`.
:raises: :class:`dictalchemy.errors.DictalchemyError` If a primary key is \
in data and allow_pk is False
:returns: The model
"""
follow = arg_to_dict(follow)
info = inspect(model)
columns = [c.key for c in info.mapper.column_attrs]
synonyms = [c.key for c in info.mapper.synonyms]
relations = [c.key for c in info.mapper.relationships]
primary_keys = [c.key for c in info.mapper.primary_key]
if allow_pk is None:
allow_pk = getattr(model, 'dictalchemy_fromdict_allow_pk',
constants.default_fromdict_allow_pk)
if only:
valid_keys = only
else:
exclude = exclude or []
exclude += getattr(model, 'dictalchemy_exclude',
constants.default_exclude) or []
if exclude_underscore is None:
exclude_underscore = getattr(model,
'dictalchemy_exclude_underscore',
constants.default_exclude_underscore)
if exclude_underscore:
# Exclude all properties starting with underscore
exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_']
include = (include or []) + (getattr(model,
'dictalchemy_fromdict_include',
getattr(model,
'dictalchemy_include',
None)) or [])
valid_keys = [k for k in columns + synonyms
if k not in exclude] + include
# Keys that will be updated
update_keys = set(valid_keys) & set(data.keys())
# Check for primary keys
data_primary_key= update_keys & set(primary_keys)
if len(data_primary_key) and not allow_pk:
msg = ("Primary keys({0}) cannot be updated by fromdict."
"Set 'dictalchemy_fromdict_allow_pk' to True in your Model"
" or pass 'allow_pk=True'.").format(','.join(data_primary_key))
raise errors.DictalchemyError(msg)
# Update columns and synonyms
for k in update_keys:
setattr(model, k, data[k])
# Update simple relations
for (k, args) in follow.iteritems():
if k not in data:
continue
if k not in relations:
raise errors.MissingRelationError(k)
rel = getattr(model, k)
if hasattr(rel, 'fromdict'):
rel.fromdict(data[k], **args)
return model
def iter(model):
"""iter method for models
Yields everything returned by `asdict`.
"""
for i in model.asdict().iteritems():
yield i
def make_class_dictable(
cls,
exclude=constants.default_exclude,
exclude_underscore=constants.default_exclude_underscore,
fromdict_allow_pk=constants.default_fromdict_allow_pk,
include=None,
asdict_include=None,
fromdict_include=None):
"""Make a class dictable
Useful for when the Base class is already defined, for example when using
Flask-SQLAlchemy.
Warning: This method will overwrite existing attributes if they exists.
:param exclude: Will be set as dictalchemy_exclude on the class
:param exclude_underscore: Will be set as dictalchemy_exclude_underscore \
on the class
:param fromdict_allow_pk: Will be set as dictalchemy_fromdict_allow_pk\
on the class
:param include: Will be set as dictalchemy_include on the class.
:param asdict_include: Will be set as `dictalchemy_asdict_include` on the \
class. If not None it will override `dictalchemy_include`.
:param fromdict_include: Will be set as `dictalchemy_fromdict_include` on \
the class. If not None it will override `dictalchemy_include`.
:returns: The class
"""
setattr(cls, 'dictalchemy_exclude', exclude)
setattr(cls, 'dictalchemy_exclude_underscore', exclude_underscore)
setattr(cls, 'dictalchemy_fromdict_allow_pk', fromdict_allow_pk)
setattr(cls, 'asdict', asdict)
setattr(cls, 'fromdict', fromdict)
setattr(cls, '__iter__', iter)
setattr(cls, 'dictalchemy_include', include)
setattr(cls, 'dictalchemy_asdict_include', asdict_include)
setattr(cls, 'dictalchemy_fromdict_include', fromdict_include)
return cls
|
py
|
1a5ca102b9c050b034dbf33bdd7287143b2fb1ff
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
import sys
import time
import unittest
import apache_beam as beam
from apache_beam import coders
from apache_beam.options.pipeline_options import PipelineOptions
from apache_beam.portability.api.beam_interactive_api_pb2 import TestStreamFileRecord
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_beam as ib
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import pipeline_instrument as pi
from apache_beam.runners.interactive.interactive_runner import InteractiveRunner
from apache_beam.runners.interactive.options.capture_limiters import Limiter
from apache_beam.runners.interactive.recording_manager import ElementStream
from apache_beam.runners.interactive.recording_manager import Recording
from apache_beam.runners.interactive.recording_manager import RecordingManager
from apache_beam.runners.interactive.testing.test_cache_manager import FileRecordsBuilder
from apache_beam.runners.interactive.testing.test_cache_manager import InMemoryCache
from apache_beam.runners.runner import PipelineState
from apache_beam.testing.test_stream import TestStream
from apache_beam.testing.test_stream import WindowedValueHolder
from apache_beam.transforms.window import GlobalWindow
from apache_beam.utils.timestamp import MIN_TIMESTAMP
from apache_beam.utils.windowed_value import WindowedValue
# TODO(BEAM-8288): clean up the work-around of nose tests using Python2 without
# unittest.mock module.
try:
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock # type: ignore[misc]
class MockPipelineResult(beam.runners.runner.PipelineResult):
"""Mock class for controlling a PipelineResult."""
def __init__(self):
self._state = PipelineState.RUNNING
def wait_until_finish(self):
pass
def set_state(self, state):
self._state = state
@property
def state(self):
return self._state
def cancel(self):
self._state = PipelineState.CANCELLED
class ElementStreamTest(unittest.TestCase):
def setUp(self):
ie.new_env()
self.cache = InMemoryCache()
self.p = beam.Pipeline()
self.pcoll = self.p | beam.Create([])
self.cache_key = str(pi.CacheKey('pcoll', '', '', ''))
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
self.mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(self.p, self.mock_result)
ie.current_env().set_cache_manager(self.cache, self.p)
def test_read(self):
"""Test reading and if a stream is done no more elements are returned."""
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(['expected'], 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)
self.assertFalse(stream.is_done())
self.assertEqual(list(stream.read())[0], 'expected')
self.assertTrue(stream.is_done())
def test_done_if_terminated(self):
"""Test that terminating the job sets the stream as done."""
self.cache.write(['expected'], 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)
self.assertFalse(stream.is_done())
self.assertEqual(list(stream.read(tail=False))[0], 'expected')
# The limiters were not reached, so the stream is not done yet.
self.assertFalse(stream.is_done())
self.mock_result.set_state(PipelineState.DONE)
self.assertEqual(list(stream.read(tail=False))[0], 'expected')
# The underlying pipeline is terminated, so the stream won't yield new
# elements.
self.assertTrue(stream.is_done())
def test_read_n(self):
"""Test that the stream only reads 'n' elements."""
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(list(range(5)), 'full', self.cache_key)
self.cache.save_pcoder(None, 'full', self.cache_key)
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=1, max_duration_secs=1)
self.assertEqual(list(stream.read()), [0])
self.assertTrue(stream.is_done())
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=2, max_duration_secs=1)
self.assertEqual(list(stream.read()), [0, 1])
self.assertTrue(stream.is_done())
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=5, max_duration_secs=1)
self.assertEqual(list(stream.read()), list(range(5)))
self.assertTrue(stream.is_done())
# Test that if the user asks for more than in the cache it still returns.
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=10, max_duration_secs=1)
self.assertEqual(list(stream.read()), list(range(5)))
self.assertTrue(stream.is_done())
def test_read_duration(self):
"""Test that the stream only reads a 'duration' of elements."""
def as_windowed_value(element):
return WindowedValueHolder(WindowedValue(element, 0, []))
values = (FileRecordsBuilder(tag=self.cache_key)
.advance_processing_time(1)
.add_element(element=as_windowed_value(0), event_time_secs=0)
.advance_processing_time(1)
.add_element(element=as_windowed_value(1), event_time_secs=1)
.advance_processing_time(1)
.add_element(element=as_windowed_value(2), event_time_secs=3)
.advance_processing_time(1)
.add_element(element=as_windowed_value(3), event_time_secs=4)
.advance_processing_time(1)
.add_element(element=as_windowed_value(4), event_time_secs=5)
.build()) # yapf: disable
values = [
v.recorded_event for v in values if isinstance(v, TestStreamFileRecord)
]
self.mock_result.set_state(PipelineState.DONE)
self.cache.write(values, 'full', self.cache_key)
self.cache.save_pcoder(coders.FastPrimitivesCoder(), 'full', self.cache_key)
# The following tests a progression of reading different durations from the
# cache.
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=1)
self.assertSequenceEqual([e.value for e in stream.read()], [0])
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=2)
self.assertSequenceEqual([e.value for e in stream.read()], [0, 1])
stream = ElementStream(
self.pcoll, '', self.cache_key, max_n=100, max_duration_secs=10)
self.assertSequenceEqual([e.value for e in stream.read()], [0, 1, 2, 3, 4])
class RecordingTest(unittest.TestCase):
def setUp(self):
ie.new_env()
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_computed(self):
"""Tests that a PCollection is marked as computed only in a complete state.
Because the background caching job is now long-lived, repeated runs of a
PipelineFragment may yield different results for the same PCollection.
"""
p = beam.Pipeline(InteractiveRunner())
elems = p | beam.Create([0, 1, 2])
ib.watch(locals())
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(p, mock_result)
# Create a mock BackgroundCachingJob that will control whether to set the
# PCollections as computed or not.
bcj_mock_result = MockPipelineResult()
background_caching_job = bcj.BackgroundCachingJob(bcj_mock_result, [])
# Create a recording.
recording = Recording(
p, [elems],
mock_result,
pi.PipelineInstrument(p),
max_n=10,
max_duration_secs=60)
# The background caching job and the recording isn't done yet so there may
# be more elements to be recorded.
self.assertFalse(recording.is_computed())
self.assertFalse(recording.computed())
self.assertTrue(recording.uncomputed())
# The recording is finished but the background caching job is not. There
# may still be more elements to record, or the intermediate PCollection may
# have stopped caching in an incomplete state, e.g. before a window could
# fire.
mock_result.set_state(PipelineState.DONE)
recording.wait_until_finish()
self.assertFalse(recording.is_computed())
self.assertFalse(recording.computed())
self.assertTrue(recording.uncomputed())
# The background caching job finished before we started a recording which
# is a sure signal that there will be no more elements.
bcj_mock_result.set_state(PipelineState.DONE)
ie.current_env().set_background_caching_job(p, background_caching_job)
recording = Recording(
p, [elems],
mock_result,
pi.PipelineInstrument(p),
max_n=10,
max_duration_secs=60)
recording.wait_until_finish()
# There are no more elements and the recording finished, meaning that the
# intermediate PCollections are in a complete state. They can now be marked
# as computed.
self.assertTrue(recording.is_computed())
self.assertTrue(recording.computed())
self.assertFalse(recording.uncomputed())
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_describe(self):
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])
ib.watch(locals())
# Create a MockPipelineResult to control the state of a fake run of the
# pipeline.
mock_result = MockPipelineResult()
ie.current_env().track_user_pipelines()
ie.current_env().set_pipeline_result(p, mock_result)
cache_manager = InMemoryCache()
ie.current_env().set_cache_manager(cache_manager, p)
# Create a recording with an arbitrary start time.
recording = Recording(
p, [numbers, letters],
mock_result,
pi.PipelineInstrument(p),
max_n=10,
max_duration_secs=60)
# Get the cache key of the stream and write something to cache. This is
# so that a pipeline doesn't have to run in the test.
numbers_stream = recording.stream(numbers)
cache_manager.write([0, 1, 2], 'full', numbers_stream.cache_key)
cache_manager.save_pcoder(None, 'full', numbers_stream.cache_key)
letters_stream = recording.stream(letters)
cache_manager.write(['a', 'b', 'c'], 'full', letters_stream.cache_key)
cache_manager.save_pcoder(None, 'full', letters_stream.cache_key)
# Get the description.
description = recording.describe()
size = description['size']
self.assertEqual(
size,
cache_manager.size('full', numbers_stream.cache_key) +
cache_manager.size('full', letters_stream.cache_key))
class RecordingManagerTest(unittest.TestCase):
def setUp(self):
ie.new_env()
def tearDown(self):
ib.options.capture_control.set_limiters_for_test([])
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_basic_execution(self):
"""A basic pipeline to be used as a smoke test."""
# Create the pipeline that will emit 0, 1, 2.
p = beam.Pipeline(InteractiveRunner())
numbers = p | 'numbers' >> beam.Create([0, 1, 2])
letters = p | 'letters' >> beam.Create(['a', 'b', 'c'])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm = RecordingManager(p)
numbers_recording = rm.record([numbers], max_n=3, max_duration=500)
numbers_stream = numbers_recording.stream(numbers)
numbers_recording.wait_until_finish()
# Once the pipeline fragment completes, we can read from the stream and know
# that all elements were written to cache.
elems = list(numbers_stream.read())
expected_elems = [
WindowedValue(i, MIN_TIMESTAMP, [GlobalWindow()]) for i in range(3)
]
self.assertListEqual(elems, expected_elems)
# Make an extra recording and test the description.
letters_recording = rm.record([letters], max_n=3, max_duration=500)
letters_recording.wait_until_finish()
self.assertEqual(
rm.describe()['size'],
numbers_recording.describe()['size'] +
letters_recording.describe()['size'])
rm.cancel()
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_duration_parsing(self):
p = beam.Pipeline(InteractiveRunner())
elems = p | beam.Create([0, 1, 2])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects.
rm = RecordingManager(p)
recording = rm.record([elems], max_n=3, max_duration='500s')
recording.wait_until_finish()
# Assert that the duration was parsed correctly to integer seconds.
self.assertEqual(recording.describe()['duration'], 500)
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_cancel_stops_recording(self):
# Add the TestStream so that it can be cached.
ib.options.capturable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
elems = (
p
| TestStream().advance_watermark_to(0).advance_processing_time(
1).add_elements(list(range(10))).advance_processing_time(1))
squares = elems | beam.Map(lambda x: x**2)
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Get the recording then the BackgroundCachingJob.
rm = RecordingManager(p)
recording = rm.record([squares], max_n=10, max_duration=30)
# The BackgroundCachingJob is still waiting for more elements, so it isn't
# done yet.
bcj = ie.current_env().get_background_caching_job(p)
self.assertFalse(bcj.is_done())
# Assert that something was read and that the BackgroundCachingJob was
# sucessfully stopped.
self.assertTrue(list(recording.stream(squares).read()))
rm.cancel()
self.assertTrue(bcj.is_done())
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_recording_manager_clears_cache(self):
"""Tests that the RecordingManager clears the cache before recording.
A job may have incomplete PCollections when the job terminates. Clearing the
cache ensures that correct results are computed every run.
"""
# Add the TestStream so that it can be cached.
ib.options.capturable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
elems = (
p
| TestStream().advance_watermark_to(0).advance_processing_time(
1).add_elements(list(range(10))).advance_processing_time(1))
squares = elems | beam.Map(lambda x: x**2)
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Do the first recording to get the timestamp of the first time the fragment
# was run.
rm = RecordingManager(p)
rm.record([squares], max_n=10, max_duration=2)
first_recording_start = rm.describe()['start']
rm.cancel()
# Get the cache, key, and coder to read the PCollection from the cache.
pipeline_instrument = pi.PipelineInstrument(p)
cache = ie.current_env().get_cache_manager(p)
cache_key = pipeline_instrument.cache_key(squares)
# Set up a mock for the Cache's clear function which will be used to clear
# uncomputed PCollections.
cache.clear = MagicMock()
# Rerun the fragment. If the cache was cleared correctly then the starting
# time of the second recording will be later than the first. This is because
# the PCollection wasn't considered to be computedand was cleared from
# cache. Thus the pipeline fragment was rerun for that PCollection at a
# later time.
rm.record([squares], max_n=10, max_duration=1)
second_recording_start = rm.describe()['start']
rm.cancel()
self.assertGreater(second_recording_start, first_recording_start)
# Assert that the cache cleared the PCollection.
cache.clear.assert_called_with('full', cache_key)
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_clear(self):
"""Tests that clear can empty the cache for a specific pipeline."""
# Create two pipelines so we can check that clearing the cache won't clear
# all defined pipelines.
p1 = beam.Pipeline(InteractiveRunner())
elems_1 = p1 | 'elems 1' >> beam.Create([0, 1, 2])
p2 = beam.Pipeline(InteractiveRunner())
elems_2 = p2 | 'elems 2' >> beam.Create([0, 1, 2])
# Watch the pipeline and PCollections. This is normally done in a notebook
# environment automatically, but we have to do it manually here.
ib.watch(locals())
ie.current_env().track_user_pipelines()
# Create the recording objects. By calling `record` a new PipelineFragment
# is started to compute the given PCollections and cache to disk.
rm_1 = RecordingManager(p1)
recording = rm_1.record([elems_1], max_n=3, max_duration=500)
recording.wait_until_finish()
rm_2 = RecordingManager(p2)
recording = rm_2.record([elems_2], max_n=3, max_duration=500)
recording.wait_until_finish()
# Assert that clearing only one recording clears that recording.
self.assertGreater(rm_1.describe()['size'], 0)
self.assertGreater(rm_2.describe()['size'], 0)
rm_1.clear()
self.assertEqual(rm_1.describe()['size'], 0)
self.assertGreater(rm_2.describe()['size'], 0)
rm_2.clear()
self.assertEqual(rm_2.describe()['size'], 0)
@unittest.skipIf(
sys.version_info < (3, 6, 0),
'This test requires at least Python 3.6 to work.')
def test_record_pipeline(self):
# Add the TestStream so that it can be cached.
ib.options.capturable_sources.add(TestStream)
p = beam.Pipeline(
InteractiveRunner(), options=PipelineOptions(streaming=True))
# pylint: disable=unused-variable
_ = (p
| TestStream()
.advance_watermark_to(0)
.advance_processing_time(1)
.add_elements(list(range(10)))
.advance_processing_time(1)) # yapf: disable
# Watch the local scope for Interactive Beam so that referenced PCollections
# will be cached.
ib.watch(locals())
# This is normally done in the interactive_utils when a transform is
# applied but needs an IPython environment. So we manually run this here.
ie.current_env().track_user_pipelines()
# Create a lmiter that stops the background caching job when something is
# written to cache. This is used to make ensure that the pipeline is
# functioning properly and that there are no data races with the test.
class SizeLimiter(Limiter):
def __init__(self, recording_manager):
self.recording_manager = recording_manager
def is_triggered(self):
return self.recording_manager.describe()['size'] > 0
# Do the first recording to get the timestamp of the first time the fragment
# was run.
rm = RecordingManager(p)
ib.options.capture_control.set_limiters_for_test([SizeLimiter(rm)])
self.assertEqual(rm.describe()['state'], PipelineState.STOPPED)
self.assertTrue(rm.record_pipeline())
self.assertFalse(rm.record_pipeline())
for _ in range(60):
if rm.describe()['state'] == PipelineState.CANCELLED:
break
time.sleep(1)
self.assertTrue(
rm.describe()['state'] == PipelineState.CANCELLED,
'Test timed out waiting for pipeline to be cancelled. This indicates '
'that the BackgroundCachingJob did not cache anything.')
if __name__ == '__main__':
unittest.main()
|
py
|
1a5ca1802e7d796ce100fbce833a7994e502ae66
|
# coding: utf-8
try:
from rest_framework_jwt.settings import api_settings
def jwt_encode_handler(payload):
"""
Encode handler override for JWT
"""
import jwt
return jwt.encode(
payload,
str(api_settings.JWT_SECRET_KEY),
str(api_settings.JWT_ALGORITHM)
).decode('utf-8')
def jwt_decode_handler(token):
"""
Decode handler override for JWT
"""
options = {
'verify_exp': api_settings.JWT_VERIFY_EXPIRATION,
}
import jwt
return jwt.decode(
token,
str(api_settings.JWT_SECRET_KEY),
str(api_settings.JWT_VERIFY),
options=options,
leeway=api_settings.JWT_LEEWAY,
audience=api_settings.JWT_AUDIENCE,
issuer=api_settings.JWT_ISSUER,
algorithms=[api_settings.JWT_ALGORITHM]
)
def jwt_payload_handler(user):
"""
Payload handler for JWT
"""
from rest_framework_jwt.utils import jwt_payload_handler as payload_handler
payload = payload_handler(user)
payload.update(
user_id=user.pk,
email=getattr(user, 'email', None),
is_staff=getattr(user, 'is_staff', None),
is_superuser=getattr(user, 'is_superuser', None))
return payload
def jwt_response_payload_handler(token, user, request):
"""
Token payload handler for JWT
"""
from django.utils.timezone import now
if user and hasattr(user, 'last_login'):
user.last_login = now()
user.save(update_fields=['last_login'])
return {'token': token}
except ImportError:
pass
|
py
|
1a5ca4094513214ecb79e534d370c97f3042087d
|
"""Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
# Copyright (c) 2021 Kaen Chan
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
from evaluation.pyeer.eer_info import get_eer_stats
from mc_dropout.network_mcdropout import NetworkMCDropout
from utils import utils
from dataset.dataset_np_ipc import Dataset
import os
import argparse
import sys
import numpy as np
from scipy import misc
from scipy import interpolate
import sklearn
import cv2
import math
import datetime
import pickle
from sklearn.decomposition import PCA
import mxnet as mx
from mxnet import ndarray as nd
import _pickle as cPickle
from utils.utils import KFold
def calculate_eer(embeddings1, embeddings2, actual_issame, compare_func, nrof_folds=10):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
dist = compare_func(embeddings1, embeddings2)
gscores_a = dist[actual_issame == 1]
iscores_a = dist[actual_issame == 0]
stats_a = get_eer_stats(gscores_a, iscores_a)
return stats_a
def evaluate(embeddings, actual_issame, compare_func, nrof_folds=10, keep_idxes=None):
# Calculate evaluation metrics
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
actual_issame = np.asarray(actual_issame)
if keep_idxes is not None:
embeddings1 = embeddings1[keep_idxes]
embeddings2 = embeddings2[keep_idxes]
actual_issame = actual_issame[keep_idxes]
return calculate_eer(embeddings1, embeddings2,
actual_issame, compare_func, nrof_folds=nrof_folds)
def load_bin(path, image_size):
print(path, image_size)
with open(path, 'rb') as f:
if 'lfw_all' in path:
bins, issame_list = pickle.load(f)
else:
bins, issame_list = pickle.load(f, encoding='latin1')
data_list = []
for flip in [0]:
data = nd.empty((len(issame_list)*2, image_size[0], image_size[1], 3))
data_list.append(data)
print(len(bins))
for i in range(len(issame_list)*2):
_bin = bins[i]
# print(type(_bin))
img = mx.image.imdecode(_bin)
# img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0]:
if flip==1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][i][:] = img
# if i%5000==0:
# print('loading bin', i)
print(data_list[0].shape)
return (data_list, issame_list)
def extract_features(images_preprocessed, issame_list, extract_feature_func, batch_size, name='', result_dir='',
re_extract_feature=True):
print('testing verification..')
if name:
save_name_pkl_feature = result_dir + '/%s_feature.pkl' % name
if re_extract_feature or not os.path.exists(save_name_pkl_feature):
images = images_preprocessed
print(images.shape)
mu, sigma_sq = extract_feature_func(images)
save_data = (mu, sigma_sq, issame_list)
if name:
with open(save_name_pkl_feature, 'wb') as f:
cPickle.dump(save_data, f)
print('save', save_name_pkl_feature)
else:
with open(save_name_pkl_feature, 'rb') as f:
data = cPickle.load(f)
if len(data) == 3:
mu, sigma_sq, issame_list = data
else:
mu, sigma_sq = data
print('load', save_name_pkl_feature)
return mu, sigma_sq, issame_list
def eval_images_with_sigma(mu, sigma_sq, issame_list, nfolds=10, name='', filter_out_type='max', sigma_sizes=1):
print('sigma_sq', sigma_sq.shape)
feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
if name != '':
np.save('o_sigma_%s.npy' % name, sigma_sq)
# quality_score = -np.mean(np.log(sigma_sq), axis=1)
# print('quality_score quality_score=-np.mean(np.log(sigma_sq),axis=1) percentile [0, 10, 30, 50, 70, 90, 100]')
# print('quality_score ', np.percentile(quality_score.ravel(), [0, 10, 30, 50, 70, 90, 100]))
s = 'sigma_sq ' + str(np.percentile(sigma_sq.ravel(), [0, 10, 30, 50, 70, 90, 100])) + \
' percentile [0, 10, 30, 50, 70, 90, 100]\n'
# print(mu.shape)
# print('sigma_sq', sigma_sq.shape)
if sigma_sq.shape[1] == 2:
sigma_sq_c = np.copy(sigma_sq)
sigma_sq_list = [sigma_sq_c[:,:1], sigma_sq_c[:,1:]]
elif type(sigma_sizes) == list:
sigma_sq_list = []
idx = 0
for si in sigma_sizes:
sigma = sigma_sq[:, idx:idx + si]
if si > 1:
sigma = 1/np.mean(1/(sigma+1e-6), axis=-1)
sigma_sq_list += [sigma]
idx += si
elif sigma_sq.shape[1] > 2:
sigma_sq_list = [1/np.mean(1/(sigma_sq+1e-6), axis=-1)]
else:
sigma_sq_list = [sigma_sq]
for sigma_sq in sigma_sq_list:
sigma_sq1 = sigma_sq[0::2]
sigma_sq2 = sigma_sq[1::2]
# filter_out_type = 'max'
if filter_out_type == 'max':
sigma_fuse = np.maximum(sigma_sq1, sigma_sq2)
else:
sigma_fuse = sigma_sq1 + sigma_sq2
# reject_factor = 0.1
error_list = []
# reject_factors = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
# reject_factors = np.arange(50) / 100.
# reject_factors = np.arange(30) / 100.
# reject_factors = [0.0, 0.1, 0.2, 0.3]
reject_factors_points = [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3]
reject_factors = np.arange(0, 1.0, 0.01)
for reject_factor in reject_factors:
risk_threshold = np.percentile(sigma_fuse.ravel(), (1-reject_factor)*100)
keep_idxes = np.where(sigma_fuse <= risk_threshold)[0]
if len(keep_idxes) == 0:
keep_idxes = None
stats = evaluate(mu, issame_list, utils.pair_cosin_score, nrof_folds=nfolds, keep_idxes=keep_idxes)
if reject_factor in reject_factors_points:
s += 'reject_factor {:.4f} '.format(reject_factor)
s += 'risk_threshold {:.6f} '.format(risk_threshold)
s += 'keep_idxes {} / {} '.format(len(keep_idxes), len(sigma_fuse))
s += 'Cosine score eer %f fmr100 %f fmr1000 %f\n' % (stats.eer, stats.fmr100, stats.fmr1000)
error_list += [stats.fmr1000]
# print('cosin', 'acc', accuracy, 'threshold', threshold)
# print(s)
# compare_func = lambda x,y: utils.pair_MLS_score(x, y, use_attention_only=False)
# accuracy, threshold = evaluate(feat_pfe, issame_list, compare_func, nrof_folds=nfolds, keep_idxes=keep_idxes)
# s += 'MLS score acc %f threshold %f' % (accuracy, threshold)
# print('MLS', 'acc', accuracy, 'threshold', threshold)
if keep_idxes is None:
break
# s_avg = 'reject_factor 0.5 risk_threshold 0.585041 keep_idxes 3500 / 7000 '
s_avg = 'reject_factor mean --------------------------------------------- '
s_avg += 'Cosine score fmr1000 %f\n' % (np.mean(error_list))
s += s_avg
tpr = error_list
fpr = reject_factors
auc = sklearn.metrics.auc(fpr, tpr)
l = int(len(tpr)*0.3)
auc30 = sklearn.metrics.auc(fpr[:l], tpr[:l])
s += 'AUERC: %1.4f\n' % auc
s += 'AUERC30: %1.4f\n' % auc30
best = error_list[0]**2/2
s += 'AUC: %1.4f\n' % (auc-best)
best30 = (error_list[0] * min(error_list[0], 0.3))/2
s += 'AUC30: %1.4f\n' % (auc30-best30)
s += '\n'
# print(s)
return s[:-1]
def eval_images_mls(mu, sigma_sq_i, issame_list, nfolds=10, sigma_sizes=1):
print('sigma_sq', sigma_sq_i.shape)
if sigma_sq_i.shape[1] == 2:
sigma_sq_c = np.copy(sigma_sq_i)
sigma_sq_list = [sigma_sq_c[:,:1], sigma_sq_c[:,1:]]
elif type(sigma_sizes) == list:
sigma_sq_list = []
idx = 0
for si in sigma_sizes:
sigma_sq_list += [sigma_sq_i[:, idx:idx+si]]
idx += si
else:
sigma_sq_list = [sigma_sq_i]
s = ''
ret = {}
accuracy, threshold = evaluate(mu, issame_list, utils.pair_cosin_score, nrof_folds=nfolds)
ret['Cosine'] = accuracy
s += 'Cosine score acc %.4f threshold %.4f\n' % (accuracy, threshold)
for sigma_sq in sigma_sq_list:
print('testing verification..')
feat_pfe = np.concatenate([mu, sigma_sq], axis=1)
# quality_score = -np.mean(np.log(sigma_sq), axis=1)
compare_func = lambda x, y: utils.pair_MLS_score(x, y, use_attention_only=False)
accuracy, threshold = evaluate(feat_pfe, issame_list, compare_func, nrof_folds=nfolds)
s += 'MLS score acc %.4f threshold %.4f\n' % (accuracy, threshold)
ret['MLS'] = accuracy
# print(s)
return s, ret
def eval_images(images_preprocessed, issame_list, extract_feature_func, batch_size, nfolds=10, name='', result_dir='',
re_extract_feature=True, filter_out_type='max', sigma_sizes=1, tt_flip=False, only_mls=True):
mu, sigma_sq, issame_list = extract_features(images_preprocessed, issame_list, extract_feature_func, batch_size,
name=name, result_dir=result_dir,
re_extract_feature=re_extract_feature)
# s_mls, ret = eval_images_mls(mu, sigma_sq, issame_list, nfolds=nfolds, sigma_sizes=sigma_sizes)
s_mls, ret = '', [0]
info = s_mls
if not only_mls:
s_reject = eval_images_with_sigma(mu, sigma_sq, issame_list, nfolds=nfolds, name='',
filter_out_type=filter_out_type, sigma_sizes=sigma_sizes)
info = s_mls + s_reject
if tt_flip:
info = info.replace('Cosine score acc', 'tt-flip Cosine score acc')
info = info.replace('MLS score acc', 'tt-flip MLS score acc')
return info, ret
def save_dataset_as_jpg(data_set, name):
data_list = data_set[0]
issame_list = data_set[1]
data_list = data_list[0].asnumpy()
root = r'F:\data\face-recognition\test\1v1'
for i in range(len(data_list)):
path = os.path.join(root, name)
if not os.path.exists(path):
os.makedirs(path)
path = os.path.join(path, '%04d_%d.jpg' % (i, issame_list[i//2]))
print(path)
cv2.imwrite(path, data_list[i].astype(np.uint8)[...,::-1])
def eval(data_set, network, batch_size, nfolds=10, name='', result_dir='', re_extract_feature=True,
filter_out_type='max', sigma_sizes=1, tt_flip=False):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
data_list = data_list[0].asnumpy()
# images = preprocess(data_list, network.config, False)
images = data_list
del data_set
for i in range(1):
# name1 = name + '_keep0.9_%03d' % i
name1 = name
extract_feature_func = lambda x: network.extract_feature(x, batch_size=32, need_preprecess=True,
tt_flip=tt_flip, verbose=True)
ret, _ = eval_images(images, issame_list, extract_feature_func, batch_size, nfolds=nfolds, name=name1,
result_dir=result_dir, re_extract_feature=re_extract_feature,
filter_out_type=filter_out_type, sigma_sizes=sigma_sizes, tt_flip=tt_flip, only_mls=False)
# ret = eval_images_cmp(images, issame_list, network, batch_size, nfolds=10, name=name, result_dir=result_dir,
# re_extract_feature=re_extract_feature, filter_out_type=filter_out_type)
return ret
def main_save_data(args):
data_dir = args.dataset_path
data_dir = r'F:\data\face-recognition\MS-Celeb-1M\faces_emore'
data_dir = r'F:\data\face-recognition\trillion-pairs\challenge\ms1m-retinaface-t1'
for name in args.target.split(','):
path = os.path.join(data_dir,name+".bin")
if os.path.exists(path):
image_size = [112, 112]
data_set = load_bin(path, image_size)
save_dataset_as_jpg(data_set, name)
def eval_images_from_pkl_all_data(model_dir, targets=None):
filter_out_type = 'max'
densefilter = False
seperate = False
nfolds = 10
# featmodels = ['r64', 'r100', 'r50']
# featmodels = ['r100', 'r50']
featmodels = ['same']
for featmodel in featmodels:
mu_list = []
idq_list = []
iss_list = []
if targets is None:
targets = 'lfw,calfw,cplfw,cfp_ff,cfp_fp,agedb_30,vgg2_fp'
for target in targets.split(','):
path_pkl = model_dir + r'\%s_feature.pkl' % target
with open(path_pkl, 'rb') as f:
data = cPickle.load(f)
mu2, id_quality, issame_list = data
save_name_pkl_feature = None
if featmodel == 'same':
save_name_pkl_feature = r'{}\{}_feature.pkl'.format(model_dir, target)
elif featmodel == 'r100':
save_name_pkl_feature = r'F:\data\face-recognition\test\IJB_release\pretrained_models\MS1MV2-ResNet100-Arcface\{}_feature.pkl'.format(target)
elif featmodel == 'r50':
save_name_pkl_feature = r'F:\data\face-recognition\test\IJB_release\pretrained_models\VGG2-ResNet50-Arcface\{}_feature.pkl'.format(target)
elif featmodel == 'r64':
save_name_pkl_feature = r'G:\chenkai\Probabilistic-Face-Embeddings-master\log\resface64_relu_msarcface_am_PFE_mbv3small_reject\20210116-040122-s16-m0.4-tau1\{}_feature.pkl'.format(
target)
else:
raise ('error', save_name_pkl_feature)
with open(save_name_pkl_feature, 'rb') as f:
data = cPickle.load(f)
mu2, _, _ = data
print('load verification model', save_name_pkl_feature)
if seperate:
s = eval_images_with_sigma(mu2, id_quality, issame_list, nfolds=nfolds, name='',
filter_out_type=filter_out_type, densefilter=densefilter)
print(s)
logname = 'testing-log-fnmr-{}-{}.txt'.format(target, featmodel)
if densefilter:
logname = 'testing-log-fnmr-{}-{}-densefilter.txt'.format(target, featmodel)
with open(os.path.join(model_dir, logname), 'a') as f:
if save_name_pkl_feature is not None:
f.write(save_name_pkl_feature + '\n')
f.write(targets + '\n')
f.write(s + '\n')
continue
mu_list += list(mu2)
idq_list += list(id_quality)
iss_list += list(issame_list)
print('load', path_pkl)
# s = eval_images_with_sigma(mu, id_quality, issame_list, nfolds=10, name='', filter_out_type=filter_out_type)
# print(s)
if seperate:
continue
mu_list = np.array(mu_list)
idq_list = np.array(idq_list)
iss_list = np.array(iss_list)
print('pos', np.sum(iss_list), 'neg', len(iss_list)-np.sum(iss_list), 'total', len(iss_list))
if id_quality.shape[1] == 513:
sigma_sizes = [512, 1]
else:
sigma_sizes = 1
s, ret = eval_images_mls(mu_list, idq_list, iss_list, nfolds=10, sigma_sizes=sigma_sizes)
s += eval_images_with_sigma(mu_list, idq_list, iss_list, nfolds=nfolds, name='',
filter_out_type=filter_out_type, sigma_sizes=sigma_sizes)
print(s)
logname = 'testing-log-fnmr-{}-{}.txt'.format('alldata', featmodel)
if densefilter:
logname = 'testing-log-fnmr-{}-{}-densefilter.txt'.format('alldata', featmodel)
with open(os.path.join(model_dir, logname), 'a') as f:
if save_name_pkl_feature is not None:
f.write(save_name_pkl_feature + '\n')
f.write(targets + '\n')
f.write(s + '\n')
with open(os.path.join(model_dir, 'testing-log.txt'), 'a') as f:
if save_name_pkl_feature is not None:
f.write(save_name_pkl_feature + '\n')
f.write(targets + '\n')
f.write(s + '\n')
def main(args):
data_dir = args.dataset_path
# data_dir = r'F:\data\face-recognition\MS-Celeb-1M\faces_emore'
# data_dir = r'F:\data\face-recognition\trillion-pairs\challenge\ms1m-retinaface-t1'
# data_dir = r'F:\data\metric-learning\face\ms1m-retinaface-t1'
re_extract_feature = True
# filter_out_type = 'add'
filter_out_type = 'max'
tt_flip = False
# Load model files and config file
network = NetworkMCDropout(args.backbone_name, args.resume_backbone)
# # images = np.random.random([1, 128, 128, 3])
# images = np.random.random([1, 3, 112, 112])
# img = cv2.imread(r'E:\chenkai\probface-pytorch\im_96x96.jpg')
# images = np.array([img])
# for _ in range(1):
# mu, sigma_sq = network.extract_feature(images, 1, need_preprecess=True, tt_flip=True, verbose=True)
# print(mu[0, :5])
# exit(0)
# log_dir = r'E:\chenkai\face-uncertainty-pytorch\mc_dropout'
log_dir = args.log_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
print(args.target)
for namec in args.target.split(','):
path = os.path.join(data_dir,namec+".bin")
if os.path.exists(path):
print(path)
image_size = [112, 112]
data_set = load_bin(path, image_size)
name = namec
print('ver', name)
# save_pkl_name = '' # donot save feature.pkl
save_pkl_name = namec
print(log_dir)
sigma_sizes = network.uncertainty_size
info = eval(data_set, network, args.batch_size, 10, name=save_pkl_name, result_dir=log_dir,
re_extract_feature=re_extract_feature, filter_out_type=filter_out_type,
sigma_sizes=sigma_sizes, tt_flip=tt_flip)
# print(info)
info_result = '--- ' + name + ' ---\n'
info_result += data_dir + '\n'
info_result += info + "\n"
print("")
print(info_result)
with open(os.path.join(log_dir, 'testing-log-fnmr-{}-{}.txt'.format(name, filter_out_type)), 'a') as f:
f.write(info_result + '\n')
with open(os.path.join(log_dir, 'testing-log-fnmr.txt'), 'a') as f:
f.write(info_result + '\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--backbone_name", help="", type=str, default='')
parser.add_argument("--resume_backbone", help="", type=str, default='')
parser.add_argument("--log_dir", help="", type=str, default='')
parser.add_argument("--dataset_path", help="The path to the LFW dataset directory",
type=str, default=r'F:\data\metric-learning\face\ms1m-retinaface-t1')
parser.add_argument("--batch_size", help="Number of images per mini batch",
type=int, default=16)
parser.add_argument('--target', type=str, default='lfw,cfp_fp,agedb_30', help='verification targets')
args = parser.parse_args()
# args.dataset_path = r''
dataset_path_list = [
r'F:\data\metric-learning\face\ms1m-retinaface-t1',
# r'F:\data\metric-learning\face\ms1m-retinaface-t1\blur-r5-p0.05',
]
# log_dir = r'G:\chenkai\probface-pytorch\log\ir50_pfe\20210327-132611'
# log_dir = r'G:\chenkai\probface-pytorch\log\ir50_pfe\20210327-181146-mls-cl1-0.15'
# args.target = 'lfw,cfp_fp,agedb_30'
args.target = 'calfw,cplfw,cfp_ff,vgg2_fp'
args.target = 'lfw,calfw,cplfw,cfp_ff,cfp_fp,agedb_30,vgg2_fp'
# args.target = 'calfw'
args.dataset_path = dataset_path_list[0]
# args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\glint-ir50\mcdropout'
# args.backbone_name = 'backbones.iresnet.iresnet50'
# args.resume_backbone = r'E:\chenkai\arcface_torch\glint360k_cosface_r50_fp16_0.1\backbone.pth'
# main(args)
#
# args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\glint-ir100\mcdropout'
# args.backbone_name = 'backbones.iresnet.iresnet100'
# args.resume_backbone = r'E:\chenkai\arcface_torch\glint360k_cosface_r100_fp16_0.1\backbone.pth'
# main(args)
#
# args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\ms1mv3-ir50\mcdropout'
# args.backbone_name = 'backbones.iresnet.iresnet50'
# args.resume_backbone = r'E:\chenkai\arcface_torch\ms1mv3_arcface_r50_fp16\backbone.pth'
# main(args)
args.log_dir = r'E:\chenkai\face-uncertainty-pytorch\log\ms1mv3-ir50\mcdropout'
args.backbone_name = 'backbones.iresnet.iresnet50'
args.resume_backbone = r'E:\chenkai\arcface_torch\ms1mv3_arcface_r50_fp16\backbone.pth'
main(args)
|
py
|
1a5ca4219f854fbbde9615ffdb4c23934dec6c96
|
with open("input.txt") as f:
direction = list(f.read().replace("\n",""))
floor = 0
for i in direction:
if i == "(":
floor += 1
elif i == ")":
floor -= 1
print(floor)
|
py
|
1a5ca4b002942a495e1829c970b6441fdf8139ed
|
# -*- coding: utf-8 -*-
"""This file contains the definition of all languages supported by the program."""
LANGUAGES = [
{
'name': 'Spanish',
'common_words': [
'el', 'la', 'de', 'que', 'y', 'a', 'en', 'un', 'ser', 'se',
'no', 'haber', 'por', 'con', 'su', 'para', 'como', 'estar',
'tener', 'le', 'lo', 'lo', 'todo', 'pero', 'más', 'hacer',
'o', 'poder', 'decir', 'este', 'ir', 'otro', 'ese', 'la',
'si', 'me', 'ya', 'ver', 'porque', 'dar', 'cuando', 'él',
'muy', 'sin', 'vez', 'mucho', 'saber', 'qué', 'sobre',
'mi', 'alguno', 'mismo', 'yo', 'también', 'hasta'
]
},
{
'name': 'German',
'common_words': [
'das', 'ist', 'du', 'ich', 'nicht', 'die', 'es', 'und',
'der', 'was', 'wir', 'zu', 'ein', 'er', 'in', 'sie', 'mir',
'mit', 'ja', 'wie', 'den', 'auf', 'mich', 'dass', 'so',
'hier', 'eine', 'wenn', 'hat', 'all', 'sind', 'von',
'dich', 'war', 'haben', 'für', 'an', 'habe', 'da', 'nein',
'bin', 'noch', 'dir', 'uns', 'sich', 'nur',
'einen', 'kann', 'dem'
]
},
# include the English language here
# HINT: https://en.wikipedia.org/wiki/Most_common_words_in_English
]
|
py
|
1a5ca53dbdcb14d102be350a6f44d9500aea3fe5
|
import numpy as np
from mchap.io.vcf.util import vcfstr
def format_info_field(**kwargs):
"""Format key-value pairs into a VCF info field.
Parameters
----------
kwargs
Key value pairs of info field codes to values.
Returns
-------
string : str
VCF info field.
"""
parts = ["{}={}".format(k, vcfstr(v)) for k, v in kwargs.items()]
return ";".join(parts)
def format_sample_field(**kwargs):
"""Format key-value pairs into a VCF format field.
Parameters
----------
kwargs
Key value pairs of info field codes to arrays of values per sample.
Returns
-------
string : str
VCF format and sample columns.
"""
fields, arrays = zip(*kwargs.items())
fields = ":".join(fields)
lengths = np.array([len(a) for a in arrays])
length = lengths[0]
assert np.all(lengths == length)
sample_data = np.empty(length, dtype="O")
for i in range(length):
sample_data[i] = ":".join((vcfstr(a[i]) for a in arrays))
sample_data = "\t".join(sample_data)
return "{}\t{}".format(fields, sample_data)
def format_record(
*,
chrom=None,
pos=None,
id=None,
ref=None,
alt=None,
qual=None,
filter=None,
info=None,
format=None,
):
"""Format a VCF record line.
Parameters
----------
chrom : str
Variant chromosome or contig.
pos : int
Variant position.
id : str
Variant ID.
ref : str
Reference allele.
alt : list, str
Alternate alleles.
qual : int
Variant quality.
filter : str
Variant filter codes.
info : str
Variant INFO string.
format : str
Variant format codes and sample values.
Returns
-------
line : str
VCF record line.
"""
fields = [chrom, pos, id, ref, alt, qual, filter, info, format]
return "\t".join(vcfstr(f) for f in fields)
|
py
|
1a5ca61aa081cfdd9d6ea95ef8feca6e0ad3a8c9
|
#from django.shortcuts import render
#from .models import Opcion,Pregunta
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from .models import Opcion, Pregunta
# Create your views here.
def index(request):
latest_question_list = Pregunta.objects.order_by('-pub_date')[:5]
context ={
'latest_question_list' : latest_question_list
}
return render(request, 'encuesta/index.html', context)
def detalle(request, pregunta_id):
pregunta = get_object_or_404(Pregunta, pk=pregunta_id)
return render(request, 'encuesta/detalle.html', {'pregunta':pregunta})
def votar(request, pregunta_id):
pregunta = get_object_or_404(Pregunta, pk=pregunta_id)
try:
selected_opcion = pregunta.opcion_set.get(pk=request.POST['opcion'])
except (KeyError, Opcion.DoesNotExist):
return render(request, 'encuesta/detalle.html', {
'pregunta' : pregunta,
'error_message' : "No has seleccionado una opcion"
})
else:
selected_opcion.votos += 1
selected_opcion.save()
return HttpResponseRedirect(reverse('encuesta:resultados', args=(pregunta.id,)))
def resultados(request, pregunta_id):
pregunta = get_object_or_404(Pregunta, pk=pregunta_id)
return render(request, 'encuesta/resultados.html', {'pregunta' : pregunta})
|
py
|
1a5ca73a41955d832e90843f9ddb35f4f10e2b9f
|
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import qrl.generated.qrlwallet_pb2 as qrlwallet__pb2
class WalletAPIStub(object):
"""//////////////////////////
//////////////////////////
//////////////////////////
//// API ///////
//////////////////////////
//////////////////////////
//////////////////////////
This service describes the Wallet API
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.AddNewAddress = channel.unary_unary(
'/qrl.WalletAPI/AddNewAddress',
request_serializer=qrlwallet__pb2.AddNewAddressReq.SerializeToString,
response_deserializer=qrlwallet__pb2.AddNewAddressResp.FromString,
)
self.AddNewAddressWithSlaves = channel.unary_unary(
'/qrl.WalletAPI/AddNewAddressWithSlaves',
request_serializer=qrlwallet__pb2.AddNewAddressWithSlavesReq.SerializeToString,
response_deserializer=qrlwallet__pb2.AddNewAddressResp.FromString,
)
self.ListAddresses = channel.unary_unary(
'/qrl.WalletAPI/ListAddresses',
request_serializer=qrlwallet__pb2.ListAddressesReq.SerializeToString,
response_deserializer=qrlwallet__pb2.ListAddressesResp.FromString,
)
self.RemoveAddress = channel.unary_unary(
'/qrl.WalletAPI/RemoveAddress',
request_serializer=qrlwallet__pb2.RemoveAddressReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RemoveAddressResp.FromString,
)
self.IsValidAddress = channel.unary_unary(
'/qrl.WalletAPI/IsValidAddress',
request_serializer=qrlwallet__pb2.ValidAddressReq.SerializeToString,
response_deserializer=qrlwallet__pb2.ValidAddressResp.FromString,
)
self.EncryptWallet = channel.unary_unary(
'/qrl.WalletAPI/EncryptWallet',
request_serializer=qrlwallet__pb2.EncryptWalletReq.SerializeToString,
response_deserializer=qrlwallet__pb2.EncryptWalletResp.FromString,
)
self.LockWallet = channel.unary_unary(
'/qrl.WalletAPI/LockWallet',
request_serializer=qrlwallet__pb2.LockWalletReq.SerializeToString,
response_deserializer=qrlwallet__pb2.LockWalletResp.FromString,
)
self.UnlockWallet = channel.unary_unary(
'/qrl.WalletAPI/UnlockWallet',
request_serializer=qrlwallet__pb2.UnlockWalletReq.SerializeToString,
response_deserializer=qrlwallet__pb2.UnlockWalletResp.FromString,
)
self.GetRecoverySeeds = channel.unary_unary(
'/qrl.WalletAPI/GetRecoverySeeds',
request_serializer=qrlwallet__pb2.GetRecoverySeedsReq.SerializeToString,
response_deserializer=qrlwallet__pb2.GetRecoverySeedsResp.FromString,
)
self.GetWalletInfo = channel.unary_unary(
'/qrl.WalletAPI/GetWalletInfo',
request_serializer=qrlwallet__pb2.GetWalletInfoReq.SerializeToString,
response_deserializer=qrlwallet__pb2.GetWalletInfoResp.FromString,
)
self.RelayTransferTxn = channel.unary_unary(
'/qrl.WalletAPI/RelayTransferTxn',
request_serializer=qrlwallet__pb2.RelayTransferTxnReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayTransferTxnBySlave = channel.unary_unary(
'/qrl.WalletAPI/RelayTransferTxnBySlave',
request_serializer=qrlwallet__pb2.RelayTransferTxnBySlaveReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayMessageTxn = channel.unary_unary(
'/qrl.WalletAPI/RelayMessageTxn',
request_serializer=qrlwallet__pb2.RelayMessageTxnReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayMessageTxnBySlave = channel.unary_unary(
'/qrl.WalletAPI/RelayMessageTxnBySlave',
request_serializer=qrlwallet__pb2.RelayMessageTxnBySlaveReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayTokenTxn = channel.unary_unary(
'/qrl.WalletAPI/RelayTokenTxn',
request_serializer=qrlwallet__pb2.RelayTokenTxnReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayTokenTxnBySlave = channel.unary_unary(
'/qrl.WalletAPI/RelayTokenTxnBySlave',
request_serializer=qrlwallet__pb2.RelayTokenTxnBySlaveReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayTransferTokenTxn = channel.unary_unary(
'/qrl.WalletAPI/RelayTransferTokenTxn',
request_serializer=qrlwallet__pb2.RelayTransferTokenTxnReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelayTransferTokenTxnBySlave = channel.unary_unary(
'/qrl.WalletAPI/RelayTransferTokenTxnBySlave',
request_serializer=qrlwallet__pb2.RelayTransferTokenTxnBySlaveReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelaySlaveTxn = channel.unary_unary(
'/qrl.WalletAPI/RelaySlaveTxn',
request_serializer=qrlwallet__pb2.RelaySlaveTxnReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.RelaySlaveTxnBySlave = channel.unary_unary(
'/qrl.WalletAPI/RelaySlaveTxnBySlave',
request_serializer=qrlwallet__pb2.RelaySlaveTxnBySlaveReq.SerializeToString,
response_deserializer=qrlwallet__pb2.RelayTxnResp.FromString,
)
self.ChangePassphrase = channel.unary_unary(
'/qrl.WalletAPI/ChangePassphrase',
request_serializer=qrlwallet__pb2.ChangePassphraseReq.SerializeToString,
response_deserializer=qrlwallet__pb2.ChangePassphraseResp.FromString,
)
self.GetMiniTransactionsByAddress = channel.unary_unary(
'/qrl.WalletAPI/GetMiniTransactionsByAddress',
request_serializer=qrlwallet__pb2.MiniTransactionsByAddressReq.SerializeToString,
response_deserializer=qrlwallet__pb2.MiniTransactionsByAddressResp.FromString,
)
self.GetTransaction = channel.unary_unary(
'/qrl.WalletAPI/GetTransaction',
request_serializer=qrlwallet__pb2.TransactionReq.SerializeToString,
response_deserializer=qrlwallet__pb2.TransactionResp.FromString,
)
self.GetBalance = channel.unary_unary(
'/qrl.WalletAPI/GetBalance',
request_serializer=qrlwallet__pb2.BalanceReq.SerializeToString,
response_deserializer=qrlwallet__pb2.BalanceResp.FromString,
)
self.GetTotalBalance = channel.unary_unary(
'/qrl.WalletAPI/GetTotalBalance',
request_serializer=qrlwallet__pb2.TotalBalanceReq.SerializeToString,
response_deserializer=qrlwallet__pb2.TotalBalanceResp.FromString,
)
self.GetOTS = channel.unary_unary(
'/qrl.WalletAPI/GetOTS',
request_serializer=qrlwallet__pb2.OTSReq.SerializeToString,
response_deserializer=qrlwallet__pb2.OTSResp.FromString,
)
self.GetHeight = channel.unary_unary(
'/qrl.WalletAPI/GetHeight',
request_serializer=qrlwallet__pb2.HeightReq.SerializeToString,
response_deserializer=qrlwallet__pb2.HeightResp.FromString,
)
self.GetBlock = channel.unary_unary(
'/qrl.WalletAPI/GetBlock',
request_serializer=qrlwallet__pb2.BlockReq.SerializeToString,
response_deserializer=qrlwallet__pb2.BlockResp.FromString,
)
self.GetBlockByNumber = channel.unary_unary(
'/qrl.WalletAPI/GetBlockByNumber',
request_serializer=qrlwallet__pb2.BlockByNumberReq.SerializeToString,
response_deserializer=qrlwallet__pb2.BlockResp.FromString,
)
self.GetAddressFromPK = channel.unary_unary(
'/qrl.WalletAPI/GetAddressFromPK',
request_serializer=qrlwallet__pb2.AddressFromPKReq.SerializeToString,
response_deserializer=qrlwallet__pb2.AddressFromPKResp.FromString,
)
self.GetNodeInfo = channel.unary_unary(
'/qrl.WalletAPI/GetNodeInfo',
request_serializer=qrlwallet__pb2.NodeInfoReq.SerializeToString,
response_deserializer=qrlwallet__pb2.NodeInfoResp.FromString,
)
class WalletAPIServicer(object):
"""//////////////////////////
//////////////////////////
//////////////////////////
//// API ///////
//////////////////////////
//////////////////////////
//////////////////////////
This service describes the Wallet API
"""
def AddNewAddress(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def AddNewAddressWithSlaves(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ListAddresses(self, request, context):
"""rpc AddAddressFromSeed(AddAddressFromSeedReq) returns (AddAddressFromSeedResp);
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RemoveAddress(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def IsValidAddress(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def EncryptWallet(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def LockWallet(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def UnlockWallet(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetRecoverySeeds(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetWalletInfo(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayTransferTxn(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayTransferTxnBySlave(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayMessageTxn(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayMessageTxnBySlave(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayTokenTxn(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayTokenTxnBySlave(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayTransferTokenTxn(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelayTransferTokenTxnBySlave(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelaySlaveTxn(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def RelaySlaveTxnBySlave(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ChangePassphrase(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMiniTransactionsByAddress(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTransaction(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBalance(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTotalBalance(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetOTS(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetHeight(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlock(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetBlockByNumber(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAddressFromPK(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetNodeInfo(self, request, context):
# missing associated documentation comment in .proto file
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_WalletAPIServicer_to_server(servicer, server):
rpc_method_handlers = {
'AddNewAddress': grpc.unary_unary_rpc_method_handler(
servicer.AddNewAddress,
request_deserializer=qrlwallet__pb2.AddNewAddressReq.FromString,
response_serializer=qrlwallet__pb2.AddNewAddressResp.SerializeToString,
),
'AddNewAddressWithSlaves': grpc.unary_unary_rpc_method_handler(
servicer.AddNewAddressWithSlaves,
request_deserializer=qrlwallet__pb2.AddNewAddressWithSlavesReq.FromString,
response_serializer=qrlwallet__pb2.AddNewAddressResp.SerializeToString,
),
'ListAddresses': grpc.unary_unary_rpc_method_handler(
servicer.ListAddresses,
request_deserializer=qrlwallet__pb2.ListAddressesReq.FromString,
response_serializer=qrlwallet__pb2.ListAddressesResp.SerializeToString,
),
'RemoveAddress': grpc.unary_unary_rpc_method_handler(
servicer.RemoveAddress,
request_deserializer=qrlwallet__pb2.RemoveAddressReq.FromString,
response_serializer=qrlwallet__pb2.RemoveAddressResp.SerializeToString,
),
'IsValidAddress': grpc.unary_unary_rpc_method_handler(
servicer.IsValidAddress,
request_deserializer=qrlwallet__pb2.ValidAddressReq.FromString,
response_serializer=qrlwallet__pb2.ValidAddressResp.SerializeToString,
),
'EncryptWallet': grpc.unary_unary_rpc_method_handler(
servicer.EncryptWallet,
request_deserializer=qrlwallet__pb2.EncryptWalletReq.FromString,
response_serializer=qrlwallet__pb2.EncryptWalletResp.SerializeToString,
),
'LockWallet': grpc.unary_unary_rpc_method_handler(
servicer.LockWallet,
request_deserializer=qrlwallet__pb2.LockWalletReq.FromString,
response_serializer=qrlwallet__pb2.LockWalletResp.SerializeToString,
),
'UnlockWallet': grpc.unary_unary_rpc_method_handler(
servicer.UnlockWallet,
request_deserializer=qrlwallet__pb2.UnlockWalletReq.FromString,
response_serializer=qrlwallet__pb2.UnlockWalletResp.SerializeToString,
),
'GetRecoverySeeds': grpc.unary_unary_rpc_method_handler(
servicer.GetRecoverySeeds,
request_deserializer=qrlwallet__pb2.GetRecoverySeedsReq.FromString,
response_serializer=qrlwallet__pb2.GetRecoverySeedsResp.SerializeToString,
),
'GetWalletInfo': grpc.unary_unary_rpc_method_handler(
servicer.GetWalletInfo,
request_deserializer=qrlwallet__pb2.GetWalletInfoReq.FromString,
response_serializer=qrlwallet__pb2.GetWalletInfoResp.SerializeToString,
),
'RelayTransferTxn': grpc.unary_unary_rpc_method_handler(
servicer.RelayTransferTxn,
request_deserializer=qrlwallet__pb2.RelayTransferTxnReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayTransferTxnBySlave': grpc.unary_unary_rpc_method_handler(
servicer.RelayTransferTxnBySlave,
request_deserializer=qrlwallet__pb2.RelayTransferTxnBySlaveReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayMessageTxn': grpc.unary_unary_rpc_method_handler(
servicer.RelayMessageTxn,
request_deserializer=qrlwallet__pb2.RelayMessageTxnReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayMessageTxnBySlave': grpc.unary_unary_rpc_method_handler(
servicer.RelayMessageTxnBySlave,
request_deserializer=qrlwallet__pb2.RelayMessageTxnBySlaveReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayTokenTxn': grpc.unary_unary_rpc_method_handler(
servicer.RelayTokenTxn,
request_deserializer=qrlwallet__pb2.RelayTokenTxnReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayTokenTxnBySlave': grpc.unary_unary_rpc_method_handler(
servicer.RelayTokenTxnBySlave,
request_deserializer=qrlwallet__pb2.RelayTokenTxnBySlaveReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayTransferTokenTxn': grpc.unary_unary_rpc_method_handler(
servicer.RelayTransferTokenTxn,
request_deserializer=qrlwallet__pb2.RelayTransferTokenTxnReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelayTransferTokenTxnBySlave': grpc.unary_unary_rpc_method_handler(
servicer.RelayTransferTokenTxnBySlave,
request_deserializer=qrlwallet__pb2.RelayTransferTokenTxnBySlaveReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelaySlaveTxn': grpc.unary_unary_rpc_method_handler(
servicer.RelaySlaveTxn,
request_deserializer=qrlwallet__pb2.RelaySlaveTxnReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'RelaySlaveTxnBySlave': grpc.unary_unary_rpc_method_handler(
servicer.RelaySlaveTxnBySlave,
request_deserializer=qrlwallet__pb2.RelaySlaveTxnBySlaveReq.FromString,
response_serializer=qrlwallet__pb2.RelayTxnResp.SerializeToString,
),
'ChangePassphrase': grpc.unary_unary_rpc_method_handler(
servicer.ChangePassphrase,
request_deserializer=qrlwallet__pb2.ChangePassphraseReq.FromString,
response_serializer=qrlwallet__pb2.ChangePassphraseResp.SerializeToString,
),
'GetMiniTransactionsByAddress': grpc.unary_unary_rpc_method_handler(
servicer.GetMiniTransactionsByAddress,
request_deserializer=qrlwallet__pb2.MiniTransactionsByAddressReq.FromString,
response_serializer=qrlwallet__pb2.MiniTransactionsByAddressResp.SerializeToString,
),
'GetTransaction': grpc.unary_unary_rpc_method_handler(
servicer.GetTransaction,
request_deserializer=qrlwallet__pb2.TransactionReq.FromString,
response_serializer=qrlwallet__pb2.TransactionResp.SerializeToString,
),
'GetBalance': grpc.unary_unary_rpc_method_handler(
servicer.GetBalance,
request_deserializer=qrlwallet__pb2.BalanceReq.FromString,
response_serializer=qrlwallet__pb2.BalanceResp.SerializeToString,
),
'GetTotalBalance': grpc.unary_unary_rpc_method_handler(
servicer.GetTotalBalance,
request_deserializer=qrlwallet__pb2.TotalBalanceReq.FromString,
response_serializer=qrlwallet__pb2.TotalBalanceResp.SerializeToString,
),
'GetOTS': grpc.unary_unary_rpc_method_handler(
servicer.GetOTS,
request_deserializer=qrlwallet__pb2.OTSReq.FromString,
response_serializer=qrlwallet__pb2.OTSResp.SerializeToString,
),
'GetHeight': grpc.unary_unary_rpc_method_handler(
servicer.GetHeight,
request_deserializer=qrlwallet__pb2.HeightReq.FromString,
response_serializer=qrlwallet__pb2.HeightResp.SerializeToString,
),
'GetBlock': grpc.unary_unary_rpc_method_handler(
servicer.GetBlock,
request_deserializer=qrlwallet__pb2.BlockReq.FromString,
response_serializer=qrlwallet__pb2.BlockResp.SerializeToString,
),
'GetBlockByNumber': grpc.unary_unary_rpc_method_handler(
servicer.GetBlockByNumber,
request_deserializer=qrlwallet__pb2.BlockByNumberReq.FromString,
response_serializer=qrlwallet__pb2.BlockResp.SerializeToString,
),
'GetAddressFromPK': grpc.unary_unary_rpc_method_handler(
servicer.GetAddressFromPK,
request_deserializer=qrlwallet__pb2.AddressFromPKReq.FromString,
response_serializer=qrlwallet__pb2.AddressFromPKResp.SerializeToString,
),
'GetNodeInfo': grpc.unary_unary_rpc_method_handler(
servicer.GetNodeInfo,
request_deserializer=qrlwallet__pb2.NodeInfoReq.FromString,
response_serializer=qrlwallet__pb2.NodeInfoResp.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'qrl.WalletAPI', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
|
py
|
1a5ca89b250fde1145ca94a341603fb03ec22d81
|
#MenuTitle: New Tab with Dangerous Glyphs for Interpolation
# -*- coding: utf-8 -*-
__doc__="""
Finds and outputs glyphs like the equals sign, or a symmetrical period, with paths that could interpolate wrongly within themselves.
"""
import GlyphsApp
Font = Glyphs.font
outputString = ""
def nodeCounts( thisLayer ):
countList = [ len(p.nodes) for p in thisLayer.paths ]
return countList
def shiftString( myString ):
return myString[1:] + myString[0]
def nodeString( path ):
nodestring = ""
for thisNode in path.nodes:
if thisNode.type == GSOFFCURVE:
nodestring += "h"
else:
nodestring += "n"
return nodestring
def compatibleWhenReversed( path1, path2 ):
pathstring1 = nodeString(path1)
pathstring2 = "".join(reversed(nodeString(path2)))
if pathstring1 == pathstring2:
return True
return False
def compatibleWithDifferentStartPoints( path1, path2 ):
pathstring1 = nodeString(path1)
pathstring2 = nodeString(path2)
for x in pathstring1:
pathstring2 = shiftString(pathstring2)
if pathstring1 == pathstring2:
return True
return False
def check( thisLayer ):
thesePaths = thisLayer.paths
theseComponents = thisLayer.components
if len( theseComponents ) > 1:
componentNameList = [ c.componentName for c in theseComponents ]
compareValue_1 = len( componentNameList )
compareValue_2 = len( set( componentNameList ) )
if compareValue_1 != compareValue_2:
return True
if len( thisLayer.paths ) > 1:
pathStructureList = [ nodeString(p) for p in thesePaths ]
compareValue_1 = len( pathStructureList )
compareValue_2 = len( set( pathStructureList ) )
if compareValue_1 != compareValue_2:
return True
nodecounts = nodeCounts(thisLayer)
if len(nodecounts) != len( set(nodecounts) ):
numberOfPaths = len(thesePaths)
for i in range( numberOfPaths ):
firstPath = thesePaths[i]
firstPathCount = len(firstPath.nodes)
for j in range( i+1, numberOfPaths):
secondPath = thesePaths[j]
secondPathCount = len(secondPath.nodes)
if firstPathCount == secondPathCount:
if firstPath.closed and secondPath.closed and compatibleWithDifferentStartPoints( firstPath, secondPath ):
return True
elif compatibleWhenReversed( firstPath, secondPath ):
return True
if len(thisLayer.paths) == 1:
thisPath = thisLayer.paths[0]
if thisPath.closed and compatibleWithDifferentStartPoints( thisPath, thisPath ):
return True
elif compatibleWhenReversed( thisPath, thisPath ):
return True
return False
for thisGlyph in Font.glyphs:
if check( thisGlyph.layers[0] ):
outputString += "/%s" % thisGlyph.name
if outputString:
Font.newTab( outputString )
else:
Message(
"No interpolation problems",
"Cannot find any dangerous glyphs in this font.",
OKButton="Hurrah!"
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.