ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40903ee7fc59f7b7e07977bbfa6e578befc2539 | import requests
import json
url='https://ec2-54-209-30-18.compute-1.amazonaws.com/submit'
# This data needs to be updated
uid = 2736475886
dbdata = json.dumps([{
'id_': 123123,
'isTypo': "bla",
'oman': "pla"
}])
def test_upload():
r = requests.post(
url,
data=dict(
uid=uid,
data=dbdata,
test=1
),
allow_redirects=True,
verify=False
)
assert r.status_code == 200
assert r.text == 'Success'
|
py | b4090569826a632e69147b9bb55ee7c8b816f11d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Basic NetProg Cases Console Script.
Copyright (c) 2019 Cisco and/or its affiliates.
This software is licensed to you under the terms of the Cisco Sample
Code License, Version 1.1 (the "License"). You may obtain a copy of the
License at
https://developer.cisco.com/docs/licenses
All use of the material herein must be in accordance with the terms of
the License. All rights not expressly granted by the License are
reserved. Unless required by applicable law or agreed to separately in
writing, software distributed under the License is distributed on an "AS
IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
or implied.
"""
__author__ = "Russell Johnston"
__email__ = "[email protected]"
__version__ = "0.1.0"
__copyright__ = "Copyright (c) 2019 Cisco and/or its affiliates."
__license__ = "Cisco Sample Code License, Version 1.1"
from netmiko import Netmiko
from jinja2 import Environment, FileSystemLoader
import yaml
from getpass import getpass
# Load inventory file and store to dictionary
with open('inventory.yaml', 'r') as yfile:
data = yaml.load(yfile, Loader=yaml.FullLoader)
yfile.close()
for device in data:
print(device)
template_option = [
{'template':'mgmt_port','j2file':'managementport.j2'},
{'template':'server_port', 'j2file':'serverport.j2'}
]
sel_device = input('Enter the name of the device to connect to\n')
i = 0
for option in template_option:
i += 1
print('%d : %s' %(i, option['template']))
sel_template = int(input('Select Template to Deploy\n'))
sel_template -= 1
port_template = template_option[sel_template]['j2file']
port = input('Enter Port to be configured\n')
description = input('Enter port description\n')
host = {
"host": data[sel_device]['ip_addr'],
"username": input("Username: "),
"password": getpass("Password: "),
"device_type": data[sel_device]['device_type']
}
# Set the current directory for source of environment
file_loader = FileSystemLoader('.')
env = Environment(loader=file_loader)
# Define source jinja2 template
template = env.get_template(port_template)
if sel_template is 0:
config_output = template.render(
intf_name=port,
vlan=data[sel_device]['mgmt_vlan']
)
if sel_template is 1:
config_output = template.render(
intf_name=port,
native_vlan=data[sel_device]['native_vlan'],
vlan_list=data[sel_device]['vlan_list']
)
print(config_output)
print('\nConnecting to device ' + str(data[sel_device]['ip_addr']))
net_connect = Netmiko(**host)
output = net_connect.send_config_set(config_output)
print(output)
net_connect.save_config()
net_connect.disconnect()
|
py | b4090577be0bd4dbfa4fe81ebbd985aab831514e | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
# Date Conditionals (datecond)
# Version 0.1.2
# Copyright © 2015-2016, Chris Warrick.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions, and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions, and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the author of this software nor the names of
# contributors to this software may be used to endorse or promote
# products derived from this software without specific prior written
# consent.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Date range parser."""
from __future__ import print_function, unicode_literals
import dateutil.parser
import re
import operator
__all__ = ('date_in_range',)
CLAUSE = re.compile('(year|month|day|hour|minute|second|weekday|isoweekday)?'
' ?(==|!=|<=|>=|<|>) ?(.*)')
OPERATORS = {
'==': operator.eq,
'!=': operator.ne,
'<=': operator.le,
'>=': operator.ge,
'<': operator.lt,
'>': operator.gt,
}
def date_in_range(date_range, date, debug=True):
"""Check if date is in the range specified.
Format:
* comma-separated clauses (AND)
* clause: attribute comparison_operator value (spaces optional)
* attribute: year, month, day, hour, month, second, weekday, isoweekday
or empty for full datetime
* comparison_operator: == != <= >= < >
* value: integer or dateutil-compatible date input
"""
out = True
for item in date_range.split(','):
attribute, comparison_operator, value = CLAUSE.match(
item.strip()).groups()
if attribute in ('weekday', 'isoweekday'):
left = getattr(date, attribute)()
right = int(value)
elif attribute:
left = getattr(date, attribute)
right = int(value)
else:
left = date
right = dateutil.parser.parse(value)
if debug: # pragma: no cover
print(" <{0} {1} {2}>".format(left, comparison_operator, right))
out = out and OPERATORS[comparison_operator](left, right)
return out
|
py | b40905fe642a4c9fc57449ffd644ba82eff69af9 | import dash_html_components as html
import dash_core_components as dcc
tab_sim_layout = html.Div([
html.Div([
html.H6(
'Simulate Future Rates',
style={'marginLeft': '1.5em', 'font-weight':'bold'}
),
html.H6('Currency:', style={'marginLeft': '1.5em',}),
dcc.Dropdown(
id='tab-sim-curr-dropdown',
options=[{'label': i, 'value': i} for i in ['USD', 'CAD']],
value='USD',
style={'width': '100px', 'marginLeft': '.5em'},
),
html.H6('Tenor:', style={'marginLeft': '1.5em'}),
dcc.Dropdown(
id='tab-sim-tenor-dropdown',
options=[{'label': i + ' month', 'value': i}
for i in ['1', '2', '3', '6', '12']],
value='1',
style={'width': '150px', 'marginLeft': '.5em'},
),
html.H6('Transf.:', style={'marginLeft': '1.5em'}),
dcc.Dropdown(
id='tab-sim-transf-dropdown',
style={'width': '120px', 'marginLeft': '.5em'},
),
html.H6('Over:', style={'marginLeft': '1.5em'}),
dcc.RadioItems(
id='tab-sim-incr-radio',
options=[{'label': '{} day'.format(str(i)), 'value': i} for i in ['1', '25']],
value='1',
),
], style={'display': 'flex', 'marginTop': '1.5em'}),
html.Div([
html.H6('Model:', style={'marginLeft': '1.5em', }),
dcc.Dropdown(
id='tab-sim-model-dropdown',
options=[{'label': i, 'value': i} for i in ['Vasicek', 'Brownian']],
#value='Brownian',
value='Vasicek',
style={'width': '140px', 'marginLeft': '.5em'},
),
html.H6('Distribution:', style={'marginLeft': '1.5em'}),
dcc.RadioItems(
id='tab-sim-distr-radio',
style={'width': '75px', 'marginLeft': '.5em'},
),
html.H6('# Days:', style={'marginLeft': '1.5em'}),
dcc.Dropdown(
id='tab-sim-ndays-dropdown',
options=[{'label': str(i), 'value': i} for i in [50, 100, 250]],
value=50,
style={'width': '100px', 'marginLeft': '.5em'},
),
html.H6('# Paths:', style={'marginLeft': '1.5em'}),
dcc.Dropdown(
id='tab-sim-npaths-dropdown',
options=[{'label': str(i), 'value': i} for i in [5, 10, 50, 100, 500]],
value=5,
style={'width': '80px', 'marginLeft': '.5em'},
),
html.Button(
id='tab-sim-button',
n_clicks=0,
children='Re-calculate',
style={'width': '180px', 'marginLeft': '3em'}),
#Add button to recalculate paths.
], style={'display': 'flex', 'marginTop': '1.5em'}),
dcc.Graph(id='tab-sim-graph'),
html.Div([
html.Div(
id='tab-sim-slider',
style={'width': '100%'}
),
], style={'marginBottom': 25, 'marginLeft': 100, 'marginRight': 100}),
html.Div(
id='tab-sim-slider-container',
style={'textAlign': 'center'},),
])
|
py | b40906753910983eba8579c683fdf444ca8be76f | from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path("admin/", admin.site.urls),
path("", include("api.urls", namespace="api")),
]
|
py | b40907e94439f08d52bb5237528a331cbb9c45a7 | from __future__ import annotations
from typing import NoReturn
from ...base import BaseEstimator
import numpy as np
from numpy.linalg import pinv
from IMLearn.metrics.loss_functions import mean_square_error
class LinearRegression(BaseEstimator):
"""
Linear Regression Estimator
Solving Ordinary Least Squares optimization problem
"""
def __init__(self, include_intercept: bool = True) -> LinearRegression:
"""
Instantiate a linear regression estimator
Parameters
----------
include_intercept: bool, default=True
Should fitted model include an intercept or not
Attributes
----------
include_intercept_: bool
Should fitted model include an intercept or not
coefs_: ndarray of shape (n_features,) or (n_features+1,)
Coefficients vector fitted by linear regression. To be set in
`LinearRegression.fit` function.
"""
super().__init__()
self.include_intercept_, self.coefs_ = include_intercept, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
Fit Least Squares model to given samples
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
Notes
-----
Fits model with or without an intercept depending on value of `self.include_intercept_`
"""
if self.include_intercept_:
intercept = np.ones(len(X)).reshape(len(X), 1)
X_intercept = np.concatenate((intercept, X), axis=1)
self.coefs_ = np.dot(np.linalg.pinv(X_intercept), y)
else:
self.coefs_ = np.dot(np.linalg.pinv(X), y)
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
"""
if self.include_intercept_:
intercept = np.ones(len(X)).reshape(len(X), 1)
X_intercept = np.concatenate((intercept, X), axis=1)
return np.dot(X_intercept, self.coefs_)
return np.dot(X, self.coefs_)
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under MSE loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under MSE loss function
"""
return mean_square_error(y, self._predict(X))
|
py | b409080e0d64e0dd64f59919e7a98e459031e816 | # flake8: noqa
from .linkage import EmpiricalLinkage
|
py | b40908862631d67f48a6034c3c9a35113f4c7013 | from __future__ import unicode_literals
from moto.core.exceptions import JsonRESTError
class InvalidInputException(JsonRESTError):
code = 400
def __init__(self):
super(InvalidInputException, self).__init__(
"InvalidInputException",
"You provided a value that does not match the required pattern.",
)
class DuplicateOrganizationalUnitException(JsonRESTError):
code = 400
def __init__(self):
super(DuplicateOrganizationalUnitException, self).__init__(
"DuplicateOrganizationalUnitException",
"An OU with the same name already exists.",
)
|
py | b40908d6db86b2f72b95fabffdd9026caeb92a43 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#,============================================================================
"""Tests for `get_config` backwards compatibility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from keras import keras_parameterized
from keras.engine import sequential
from keras.engine import training
from keras.tests import get_config_samples
@keras_parameterized.run_all_keras_modes
class TestGetConfigBackwardsCompatible(keras_parameterized.TestCase):
def test_functional_dnn(self):
model = training.Model.from_config(get_config_samples.FUNCTIONAL_DNN)
self.assertLen(model.layers, 3)
def test_functional_cnn(self):
model = training.Model.from_config(get_config_samples.FUNCTIONAL_CNN)
self.assertLen(model.layers, 4)
def test_functional_lstm(self):
model = training.Model.from_config(get_config_samples.FUNCTIONAL_LSTM)
self.assertLen(model.layers, 3)
def test_sequential_dnn(self):
model = sequential.Sequential.from_config(get_config_samples.SEQUENTIAL_DNN)
self.assertLen(model.layers, 2)
def test_sequential_cnn(self):
model = sequential.Sequential.from_config(get_config_samples.SEQUENTIAL_CNN)
self.assertLen(model.layers, 3)
def test_sequential_lstm(self):
model = sequential.Sequential.from_config(
get_config_samples.SEQUENTIAL_LSTM)
self.assertLen(model.layers, 2)
if __name__ == '__main__':
tf.test.main()
|
py | b409093741d517ee528e3dbd71d48b267166dea1 | # -*- coding: utf-8 -*-
import torch
from torch.autograd import Function
from supar.utils.common import MIN
from supar.utils.fn import pad
def tarjan(sequence):
r"""
Tarjan algorithm for finding Strongly Connected Components (SCCs) of a graph.
Args:
sequence (list):
List of head indices.
Yields:
A list of indices making up a SCC. All self-loops are ignored.
Examples:
>>> next(tarjan([2, 5, 0, 3, 1])) # (1 -> 5 -> 2 -> 1) is a cycle
[2, 5, 1]
"""
sequence = [-1] + sequence
# record the search order, i.e., the timestep
dfn = [-1] * len(sequence)
# record the the smallest timestep in a SCC
low = [-1] * len(sequence)
# push the visited into the stack
stack, onstack = [], [False] * len(sequence)
def connect(i, timestep):
dfn[i] = low[i] = timestep[0]
timestep[0] += 1
stack.append(i)
onstack[i] = True
for j, head in enumerate(sequence):
if head != i:
continue
if dfn[j] == -1:
yield from connect(j, timestep)
low[i] = min(low[i], low[j])
elif onstack[j]:
low[i] = min(low[i], dfn[j])
# a SCC is completed
if low[i] == dfn[i]:
cycle = [stack.pop()]
while cycle[-1] != i:
onstack[cycle[-1]] = False
cycle.append(stack.pop())
onstack[i] = False
# ignore the self-loop
if len(cycle) > 1:
yield cycle
timestep = [0]
for i in range(len(sequence)):
if dfn[i] == -1:
yield from connect(i, timestep)
def chuliu_edmonds(s):
r"""
ChuLiu/Edmonds algorithm for non-projective decoding :cite:`mcdonald-etal-2005-non`.
Some code is borrowed from `tdozat's implementation`_.
Descriptions of notations and formulas can be found in :cite:`mcdonald-etal-2005-non`.
Notes:
The algorithm does not guarantee to parse a single-root tree.
Args:
s (~torch.Tensor): ``[seq_len, seq_len]``.
Scores of all dependent-head pairs.
Returns:
~torch.Tensor:
A tensor with shape ``[seq_len]`` for the resulting non-projective parse tree.
.. _tdozat's implementation:
https://github.com/tdozat/Parser-v3
"""
s[0, 1:] = MIN
# prevent self-loops
s.diagonal()[1:].fill_(MIN)
# select heads with highest scores
tree = s.argmax(-1)
# return the cycle finded by tarjan algorithm lazily
cycle = next(tarjan(tree.tolist()[1:]), None)
# if the tree has no cycles, then it is a MST
if not cycle:
return tree
# indices of cycle in the original tree
cycle = torch.tensor(cycle)
# indices of noncycle in the original tree
noncycle = torch.ones(len(s)).index_fill_(0, cycle, 0)
noncycle = torch.where(noncycle.gt(0))[0]
def contract(s):
# heads of cycle in original tree
cycle_heads = tree[cycle]
# scores of cycle in original tree
s_cycle = s[cycle, cycle_heads]
# calculate the scores of cycle's potential dependents
# s(c->x) = max(s(x'->x)), x in noncycle and x' in cycle
s_dep = s[noncycle][:, cycle]
# find the best cycle head for each noncycle dependent
deps = s_dep.argmax(1)
# calculate the scores of cycle's potential heads
# s(x->c) = max(s(x'->x) - s(a(x')->x') + s(cycle)), x in noncycle and x' in cycle
# a(v) is the predecessor of v in cycle
# s(cycle) = sum(s(a(v)->v))
s_head = s[cycle][:, noncycle] - s_cycle.view(-1, 1) + s_cycle.sum()
# find the best noncycle head for each cycle dependent
heads = s_head.argmax(0)
contracted = torch.cat((noncycle, torch.tensor([-1])))
# calculate the scores of contracted graph
s = s[contracted][:, contracted]
# set the contracted graph scores of cycle's potential dependents
s[:-1, -1] = s_dep[range(len(deps)), deps]
# set the contracted graph scores of cycle's potential heads
s[-1, :-1] = s_head[heads, range(len(heads))]
return s, heads, deps
# keep track of the endpoints of the edges into and out of cycle for reconstruction later
s, heads, deps = contract(s)
# y is the contracted tree
y = chuliu_edmonds(s)
# exclude head of cycle from y
y, cycle_head = y[:-1], y[-1]
# fix the subtree with no heads coming from the cycle
# len(y) denotes heads coming from the cycle
subtree = y < len(y)
# add the nodes to the new tree
tree[noncycle[subtree]] = noncycle[y[subtree]]
# fix the subtree with heads coming from the cycle
subtree = ~subtree
# add the nodes to the tree
tree[noncycle[subtree]] = cycle[deps[subtree]]
# fix the root of the cycle
cycle_root = heads[cycle_head]
# break the cycle and add the root of the cycle to the tree
tree[cycle[cycle_root]] = noncycle[cycle_head]
return tree
def mst(scores, mask, multiroot=False):
r"""
MST algorithm for decoding non-projective trees.
This is a wrapper for ChuLiu/Edmonds algorithm.
The algorithm first runs ChuLiu/Edmonds to parse a tree and then have a check of multi-roots,
If ``multiroot=True`` and there indeed exist multi-roots, the algorithm seeks to find
best single-root trees by iterating all possible single-root trees parsed by ChuLiu/Edmonds.
Otherwise the resulting trees are directly taken as the final outputs.
Args:
scores (~torch.Tensor): ``[batch_size, seq_len, seq_len]``.
Scores of all dependent-head pairs.
mask (~torch.BoolTensor): ``[batch_size, seq_len]``.
The mask to avoid parsing over padding tokens.
The first column serving as pseudo words for roots should be ``False``.
multiroot (bool):
Ensures to parse a single-root tree If ``False``.
Returns:
~torch.Tensor:
A tensor with shape ``[batch_size, seq_len]`` for the resulting non-projective parse trees.
Examples:
>>> scores = torch.tensor([[[-11.9436, -13.1464, -6.4789, -13.8917],
[-60.6957, -60.2866, -48.6457, -63.8125],
[-38.1747, -49.9296, -45.2733, -49.5571],
[-19.7504, -23.9066, -9.9139, -16.2088]]])
>>> scores[:, 0, 1:] = MIN
>>> scores.diagonal(0, 1, 2)[1:].fill_(MIN)
>>> mask = torch.tensor([[False, True, True, True]])
>>> mst(scores, mask)
tensor([[0, 2, 0, 2]])
"""
batch_size, seq_len, _ = scores.shape
scores = scores.cpu().unbind()
preds = []
for i, length in enumerate(mask.sum(1).tolist()):
s = scores[i][:length + 1, :length + 1]
tree = chuliu_edmonds(s)
roots = torch.where(tree[1:].eq(0))[0] + 1
if not multiroot and len(roots) > 1:
s_root = s[:, 0]
s_best = MIN
s = s.index_fill(1, torch.tensor(0), MIN)
for root in roots:
s[:, 0] = MIN
s[root, 0] = s_root[root]
t = chuliu_edmonds(s)
s_tree = s[1:].gather(1, t[1:].unsqueeze(-1)).sum()
if s_tree > s_best:
s_best, tree = s_tree, t
preds.append(tree)
return pad(preds, total_length=seq_len).to(mask.device)
class SampledLogsumexp(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
ctx.save_for_backward(x)
return x.logsumexp(dim=dim)
@staticmethod
def backward(ctx, grad_output):
from torch.distributions import OneHotCategorical
x, dim = ctx.saved_tensors, ctx.dim
if ctx.needs_input_grad[0]:
return grad_output.unsqueeze(dim).mul(
OneHotCategorical(logits=x.movedim(dim, -1)).sample().movedim(-1, dim)), None
return None, None
class SparsemaxFunction(Function):
@staticmethod
def forward(ctx, x, dim=-1):
ctx.dim = dim
sorted_x, _ = x.sort(dim, True)
z = sorted_x.cumsum(dim) - 1
k = x.new_tensor(range(1, sorted_x.size(dim) + 1)).view(-1, *[1] * (x.dim() - 1)).transpose(0, dim)
k = (k * sorted_x).gt(z).sum(dim, True)
tau = z.gather(dim, k - 1) / k
p = torch.clamp(x - tau, 0)
ctx.save_for_backward(k, p)
return p
@staticmethod
def backward(ctx, grad_output):
k, p, dim = *ctx.saved_tensors, ctx.dim
grad = grad_output.masked_fill(p.eq(0), 0)
grad = torch.where(p.ne(0), grad - grad.sum(dim, True) / k, grad)
return grad, None
sampled_logsumexp = SampledLogsumexp.apply
sparsemax = SparsemaxFunction.apply
|
py | b40909c6b96993185691c8e617bf6c5798a25c11 | import json
import datetime
import time
import RPi.GPIO as GPIO
import numpy as np
import pygame
import threading
GPIO.setmode(GPIO.BOARD)
GPIO.setwarnings(False)
def average_light_value (seconds=60):
pin_to_circuit = 7
results = np.empty(0)
timeCalled = datetime.datetime.now().hour * 360 + datetime.datetime.now().second + datetime.datetime.now().minute * 60
while (datetime.datetime.now().hour * 360 + datetime.datetime.now().second + datetime.datetime.now().minute * 60 < timeCalled + seconds ):
count = 0
#Output on the pin for
GPIO.setup(pin_to_circuit, GPIO.OUT)
GPIO.output(pin_to_circuit, GPIO.LOW)
time.sleep(0.1)
#Change the pin back to input
GPIO.setup(pin_to_circuit, GPIO.IN)
#Count until the pin goes high
while (GPIO.input(pin_to_circuit) == GPIO.LOW):
count += 1
results = np.append(results, count)
print(results)
return np.mean(results)
class SoundThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run (self):
play_sound()
class BlindsThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run (self):
open_blinds()
class WindowThread (threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
def run (self):
open_window()
def play_sound():
pygame.mixer.init()
pygame.mixer.music.load("rainforest_ambience-GlorySunz-1938133500.wav")
pygame.mixer.music.play()
print('Play sound')
def open_blinds():
print('Opening blinds')
Motor1A = 16
Motor1B = 18
Motor1E = 22
GPIO.setup(Motor1A,GPIO.OUT)
GPIO.setup(Motor1B,GPIO.OUT)
GPIO.setup(Motor1E,GPIO.OUT)
GPIO.output(Motor1A,GPIO.HIGH)
GPIO.output(Motor1B,GPIO.LOW)
GPIO.output(Motor1E,GPIO.HIGH)
time.sleep(10)
GPIO.output(Motor1E,GPIO.LOW)
def open_window():
print('Opening window')
Motor2A = 23
Motor2B = 21
Motor2E = 19
GPIO.setup(Motor2A,GPIO.OUT)
GPIO.setup(Motor2B,GPIO.OUT)
GPIO.setup(Motor2E,GPIO.OUT)
GPIO.output(Motor2A,GPIO.HIGH)
GPIO.output(Motor2B,GPIO.LOW)
GPIO.output(Motor2E,GPIO.HIGH)
time.sleep(10)
GPIO.output(Motor2E,GPIO.LOW)
# While true
# while(true):
# Pull config from files
with open('config.json') as json_file:
GPIO.setmode(GPIO.BOARD)
# Parse config into array
data = json.load(json_file)
# get current day config
dateTimeNow = datetime.datetime.now()
dayEnum = dateTimeNow.today().strftime("%A").upper()
day = data["config"][dayEnum]
# if day is enabled
if day['isEnabled']:
# GPIO.setmode(GPIO.BOARD)
activated = False
# if light activated is true
if day['lightActivated']:
value = average_light_value(30)
# if there is enough light and it is not night time (5am to 5pm)
activated = value < 500 and dateTimeNow.hour > 5 and dateTimeNow.hour < 17
print(value)
# read light sensor for 1 minuet
# average result to set activated to true or not
else:
# check to see if current time = time from config
dateTimeNow = datetime.datetime.now()
nowTimeInMin = dateTimeNow.minute + dateTimeNow.hour * 60
print("time " + str(nowTimeInMin) + " vs " + str(day['time']))
activated = day['time'] == nowTimeInMin
if activated:
if day["openBlinds"]:
# activate blinds motor
print("Open Blinds Thread Spawn")
BlindsThread().start()
if day["openWindow"]:
# activate window motor
print("Open Window Thread Spawn")
WindowThread().start()
if day["playSound"]:
# play sound
print("Play Sound")
# SoundThread().start()
pygame.mixer.init()
pygame.mixer.music.load("rainforest_ambience-GlorySunz-1938133500.wav")
pygame.mixer.music.play()
while pygame.mixer.music.get_busy() == True:
continue
GPIO.cleanup()
|
py | b4090aab76be24c6d35d1498e4ae5d66353b5212 | # Exercise 2.13
# Author: Noah Waterfield Price
s = 0
k = 1
M = 100
for k in range(1, M + 1):
s += 1. / k
print s
"""
Sample run:
python sum_for.py
5.18737751764
"""
|
py | b4090b9fa0ec46cdc76bed5756757acb9525ebc8 | import weio
import numpy as np
import os
MyDir=os.path.dirname(__file__)
Bld=weio.read(os.path.join(MyDir,'../../../data/NREL5MW/data/NREL5MW_AD15_blade.dat'))
nSpan = 10
Spn = np.linspace(0, 15, nSpan) # BlSpn, radial stations [m]
CrvAC = np.zeros((nSpan,)) # BlCrvAC, prebend (usually <0) [m]
SwpAC = np.zeros((nSpan,)) # BlSwpC, sweep [m]
CrvAng = np.concatenate(([0], np.arctan2((CrvAC[1:]-CrvAC[:-1]),(Spn[1:]-Spn[:-1]))*180/np.pi))
Twist = np.zeros((nSpan,)) + 1 # BlTwist [deg]
Chord = np.zeros((nSpan,)) + 5 # BlChord [m]
AFID = np.zeros((nSpan,)).astype(int) # BlAFID [-]
ADProp = np.column_stack((Spn,CrvAC,SwpAC,CrvAng,Twist,Chord,AFID))
Bld['NumBlNds'] = ADProp.shape[0]
Bld['BldAeroNodes'] = ADProp
Bld.write('_AeroDyn_Blade_Modified.dat')
|
py | b4090bae43c491e2279402a98185a9e69c30ea86 | #!/usr/bin/env python
"""
_MSGTransPortAgent_
MSG TransPortAgent Base Class
Use _dict_ for all transportation
"""
import random
import time
import sys
import types
import os
import time
class MSGTransPortAgent:
"""
_MSGTransPortAgent_
Main Class for sending monitoring data via the MSG server similar to apmon
"""
def __init__ (self, clusterName, nodeName, instanceId = None):
"""
Constructor
"""
self.msgmonConf = {}
self.clusterName = clusterName
self.nodeName = nodeName
self.instanceId = instanceId
self.sys_monitoring = 0
self.sys_interval = 200
self.general_info = 0
self.job_monitoring = 0
self.job_interval = 200
self.setMaxMsgRate = 500
if self.instanceId == None:
try:
for line in os.popen("/sbin/ifconfig"):
if line.find('Ether') > -1:
self.instanceId = line.split()[4]
except ValueError:
self.instanceId = random.randint(0, 0x7FFFFFFE)
def connect(self):
"""
initialize via _connect_
"""
pass
def send(self, params):
"""
_send_
"""
pass
def disconnect(self):
"""
_disconnect_
"""
pass
def newDestination(self, host, port, password = ''):
"""
_newDestination_
"""
pass
def addProcessToMonitor(self, pid = None, workDir = None):
"""
_addProcessToMonitor_
"""
pass
def removeProcessToMonitor(self, pid = None):
"""
_removeProcessToMonitor_
Remove a process from being monitored
"""
pass
|
py | b4090bef291f2b14c53e518b8f5d271d1386e637 | #! /usr/bin/python
# Filename: var.py
i = 5
print i
i = i + 1
print i
s = '''This is a multi-line string
This is the second line.'''
print s
|
py | b4090cbb430ce0f0d9bf47903da4543a94270fad | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def make_endpoint(endpoint: str) -> str:
# Sample function parameter endpoint in explain_sample
endpoint = endpoint
return endpoint
def make_instances(instance_dict: Dict) -> typing.Sequence[google.protobuf.struct_pb2.Value]:
instance = to_protobuf_value(instance_dict)
instances = [instance]
return instances
def make_parameters() -> google.protobuf.struct_pb2.Value:
parameters_dict = {}
parameters = to_protobuf_value(parameters_dict)
return parameters
def make_deployed_model_id(deployed_model_id: str) -> str:
# Sample function parameter deployed_model_id in explain_sample
deployed_model_id = deployed_model_id
return deployed_model_id
|
py | b4090cbf197ad4d9456f31e0d270c73f27a743a0 | # BSD-3-Clause License
#
# Copyright 2017 Orange
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from time import sleep
from unittest.mock import MagicMock, call
import pytest
from pydcop.infrastructure.agents import Agent
from pydcop.infrastructure.communication import UnknownAgent, \
InProcessCommunicationLayer
from pydcop.infrastructure.discovery import Discovery, Directory, \
UnknownComputation, DiscoveryException
@pytest.fixture
def standalone_discovery():
discovery = Discovery('test', 'address')
return discovery
@pytest.fixture
def directory_discovery():
# Agent hosting the directory
agt_dir = Agent('agt_dir', InProcessCommunicationLayer())
directory = Directory(agt_dir.discovery)
agt_dir.add_computation(directory.directory_computation)
agt_dir.discovery.use_directory('agt_dir', agt_dir.address)
agt_dir.start()
agt_dir.run(directory.directory_computation.name)
# standard agents
agt1 = Agent('agt1', InProcessCommunicationLayer())
agt1.discovery.use_directory('agt_dir', agt_dir.address)
agt1.start()
agt2 = Agent('agt2', InProcessCommunicationLayer())
agt2.discovery.use_directory('agt_dir', agt_dir.address)
agt2.start()
yield agt_dir, agt1, agt2
for c in agt1.computations():
agt1.remove_computation(c.name)
for c in agt1.discovery.agent_computations(agt1.name):
agt1.discovery.unregister_computation(c)
for c in agt2.computations():
agt2.remove_computation(c.name)
for c in agt2.discovery.agent_computations(agt2.name):
agt2.discovery.unregister_computation(c)
wait_run()
agt1.stop()
agt2.stop()
agt_dir.stop()
def test_register_agent_without_publish(standalone_discovery):
standalone_discovery.register_agent('agt1', 'addr1', publish=False)
assert 'agt1' in standalone_discovery.agents()
assert standalone_discovery.agent_address('agt1') == 'addr1'
def test_register_agent_publish_with_no_directory(standalone_discovery):
standalone_discovery.register_agent('agt1', 'addr1')
assert 'agt1' in standalone_discovery.agents()
assert standalone_discovery.agent_address('agt1') == 'addr1'
def test_unregister_agent_without_publish(standalone_discovery):
standalone_discovery.register_agent('agt1', 'addr1', publish=False)
standalone_discovery.unregister_agent('agt1', publish=False)
assert 'agt1' not in standalone_discovery.agents()
with pytest.raises(UnknownAgent):
standalone_discovery.agent_address('agt1')
def test_raises_on_address_for_unknown_agent(standalone_discovery):
standalone_discovery.register_agent('agt1', 'addr1', publish=False)
with pytest.raises(UnknownAgent):
standalone_discovery.agent_address('agt2')
def test_register_agent_publish_on_directory(directory_discovery):
agt_dir, agt_dis, _ = directory_discovery
agt_dis.discovery.register_agent('agt1', 'addr1')
wait_run()
# Registration must be effective on both agents
assert agt_dir.discovery.agent_address('agt1') == 'addr1'
assert agt_dis.discovery.agent_address('agt1') == 'addr1'
def test_unregister_agent_publish_on_directory(directory_discovery):
agt_dir, agt_dis, _ = directory_discovery
agt_dis.discovery.register_agent('agt_new', 'addr_new')
wait_run()
agt_dis.discovery.unregister_agent('agt_new', 'addr_new')
wait_run()
# Un-Registration must be effective on both agents
with pytest.raises(UnknownAgent):
agt_dir.discovery.agent_address('agt_new')
with pytest.raises(UnknownAgent):
agt_dis.discovery.agent_address('agt_new')
def test_unregister_agent_with_computation_fails(directory_discovery):
# When un-registering an agent, all computations for this agent must be
# unregistered first otherwise we get a DiscoveryException.
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
agt1.discovery.register_computation('comp_new', 'agt_new')
with pytest.raises(DiscoveryException):
agt1.discovery.unregister_agent('agt_new')
def test_subscribe_agent_cb(directory_discovery):
agt_dir, agt_dis, _ = directory_discovery
agt_dir.discovery.register_agent('agt_new', 'addr_new')
wait_run()
cb = agt_dis.discovery.subscribe_agent('agt_new', MagicMock())
wait_run()
cb.assert_called_once_with('agent_added', 'agt_new', 'addr_new')
def test_subscribe_agent_cb_one_shot(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
wait_run()
cb = agt2.discovery.subscribe_agent('agt_new', MagicMock(), one_shot=True)
agt1.discovery.unregister_agent('agt_new', 'addr_new')
wait_run()
# The cb msut be called only once even though there was two events for
# agent_new
cb.assert_called_once_with('agent_added', 'agt_new', 'addr_new')
def test_subscribe_agent_cb_several(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
wait_run()
cb = agt2.discovery.subscribe_agent('agt_new', MagicMock(), one_shot=False)
wait_run()
cb.assert_called_with('agent_added', 'agt_new', 'addr_new')
agt1.discovery.unregister_agent('agt_new', 'addr_new')
wait_run()
# The cb must be called twice, one for each event for agent_new
cb.assert_called_with('agent_removed', 'agt_new', None)
def test_subscribe_agent_cb_called_once(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
wait_run()
cb = agt2.discovery.subscribe_agent('agt_new', MagicMock())
wait_run()
agt2.discovery.subscribe_agent('agt_new')
wait_run()
cb.reset_mock()
agt1.discovery.register_agent('agt_new', 'addr_new2')
wait_run()
cb.assert_called_once_with('agent_added', 'agt_new', 'addr_new2')
def test_subscribe_agent_on_second_agt(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
wait_run()
cb = agt2.discovery.subscribe_agent('agt_new', MagicMock())
wait_run()
wait_run()
# Registration must be effective on both agents
assert agt_dir.discovery.agent_address('agt_new') == 'addr_new'
assert agt2.discovery.agent_address('agt_new') == 'addr_new'
cb.assert_called_once_with('agent_added', 'agt_new', 'addr_new')
def test_unsubscribe_all_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
cb = agt2.discovery.subscribe_agent('agt_new', MagicMock())
cb2 = agt2.discovery.subscribe_agent('agt_new', MagicMock())
wait_run()
cb.assert_called_with('agent_added', 'agt_new', 'addr_new')
cb2.assert_called_with('agent_added', 'agt_new', 'addr_new')
cb.reset_mock()
cb2.reset_mock()
removed = agt2.discovery.unsubscribe_agent('agt_new')
assert removed == 2
agt1.discovery.unregister_agent('agt_new', 'addr_new')
wait_run()
cb.assert_not_called()
cb2.assert_not_called()
def test_unsubscribe_one_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new', 'addr_new')
cb = agt2.discovery.subscribe_agent('agt_new', MagicMock())
cb2 = agt2.discovery.subscribe_agent('agt_new', MagicMock())
wait_run()
cb.reset_mock()
removed = agt2.discovery.unsubscribe_agent('agt_new', cb)
assert removed == 1
agt1.discovery.unregister_agent('agt_new', 'addr_new')
wait_run()
cb.assert_not_called()
def test_subscribe_all_agents(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
# Once subscribed for all agents, agt2 should be aware of any agent in the
# system without registering for an agent explicitely by its name.
agt2.discovery.subscribe_all_agents()
agt1.discovery.register_agent('agt_new1', 'addr_new')
agt1.discovery.register_agent('agt_new2', 'addr_new')
wait_run()
assert 'agt_new1' in agt2.discovery.agents()
assert 'agt_new2' in agt2.discovery.agents()
agt1.discovery.unregister_agent('agt_new2')
wait_run()
assert 'agt_new2' not in agt2.discovery.agents()
def test_subscribe_all_agents_after(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_agent('agt_new1', 'addr_new1')
agt1.discovery.register_agent('agt_new2', 'addr_new2')
wait_run()
# When subscribing for all agents, agt2 should be aware agents even if
# they were registered before its subscription.
cb = agt2.discovery.subscribe_all_agents(MagicMock())
wait_run()
assert 'agt_new1' in agt2.discovery.agents()
assert 'agt_new2' in agt2.discovery.agents()
cb.assert_has_calls(
[call('agent_added', 'agt_new1', 'addr_new1'),
call('agent_added', 'agt_new2', 'addr_new2'),
call('agent_added', 'agt1', agt1.address)],
any_order=True)
agt1.discovery.unregister_agent('agt_new2')
wait_run()
assert 'agt_new2' not in agt2.discovery.agents()
def test_subscribe_all_agents_with_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
# One subscribed for all agents, agt2 should be aware of any agent in the
# system without registering for an agent explicitely by its name.
cb = agt2.discovery.subscribe_all_agents(MagicMock())
agt1.discovery.register_agent('agt_new1', 'addr_new1')
agt1.discovery.register_agent('agt_new2', 'addr_new2')
wait_run()
assert 'agt_new1' in agt2.discovery.agents()
assert 'agt_new2' in agt2.discovery.agents()
cb.assert_has_calls(
[call('agent_added', 'agt_new1', 'addr_new1'),
call('agent_added', 'agt_new2', 'addr_new2')])
agt1.discovery.unregister_agent('agt_new2')
wait_run()
assert 'agt_new2' not in agt2.discovery.agents()
def test_list_computations():
discovery = Discovery('test', 'addr_test')
discovery.register_agent('a1', 'addr1')
discovery.register_computation('c1', 'a1')
discovery.register_computation('c2', 'a1')
assert set(discovery.computations()) == {'c1', 'c2'}
def test_list_computations_filter(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
assert not agt1.discovery.computations()
assert set(agt1.discovery.computations(include_technical=True)) == \
{'_directory', '_discovery_agt1'}
def test_computation_agent():
discovery = Discovery('test', 'addr_test')
discovery.register_agent('a1', 'addr1')
discovery.register_computation('c1', 'a1')
discovery.register_computation('c2', 'a1')
assert discovery.computation_agent('c1') == 'a1'
assert discovery.computation_agent('c2') == 'a1'
with pytest.raises(UnknownComputation):
discovery.computation_agent('c3')
def test_agent_computations(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
assert not agt1.discovery.agent_computations(agt1.name)
assert set(agt1.discovery.agent_computations(agt1.name,
include_technical=True)) == \
{'_discovery_agt1'}
agt1.discovery.register_agent(agt1.name, 'addr1')
agt1.discovery.register_computation('c1', agt1.name)
agt1.discovery.register_computation('c2', agt1.name)
assert set(agt1.discovery.agent_computations(agt1.name)) == {'c1', 'c2'}
assert set(agt1.discovery.agent_computations(agt1.name,
include_technical=True)) ==\
{'_discovery_agt1', 'c1', 'c2'}
def test_register_computation(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
with pytest.raises(UnknownAgent):
agt1.discovery.register_computation('c1', 'agt_test')
agt1.discovery.register_agent('agt_test', 'address')
wait_run()
agt1.discovery.register_computation('c1', 'agt_test')
wait_run()
assert agt1.discovery.computation_agent('c1') == 'agt_test'
# agt2 is not subscribed to c1, it does not it
with pytest.raises(UnknownComputation):
agt2.discovery.computation_agent('c1')
# But the discovery on the agent hosting the directory must know
assert agt_dir.discovery.agent_address('agt_test') == 'address'
assert agt_dir.discovery.computation_agent('c1') == 'agt_test'
def test_register_own_computation(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
wait_run()
assert agt1.discovery.computation_agent('c1') == agt1.name
assert agt_dir.discovery.computation_agent('c1') == agt1.name
def test_register_computation_with_agent_address(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1', 'agt_test', 'addr_test')
wait_run()
assert agt1.discovery.agent_address('agt_test') == 'addr_test'
assert agt_dir.discovery.agent_address('agt_test') == 'addr_test'
def test_unregister_computation(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1', 'agt_test', 'addr_test')
wait_run()
agt1.discovery.unregister_computation('c1')
wait_run()
# Un-Registration must be effective on both agents
with pytest.raises(UnknownComputation):
agt_dir.discovery.computation_agent('c1')
with pytest.raises(UnknownComputation):
agt1.discovery.computation_agent('c1')
def test_unregister_computation_with_agent(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1', 'agt_test', 'addr_test')
wait_run()
# c1 is not registered on agt_test2, must raise
with pytest.raises(ValueError):
agt1.discovery.unregister_computation('c1', 'agt_test2')
# c2 is not registered on agt_test, must not raise
agt1.discovery.unregister_computation('c2', 'agt_test')
agt1.discovery.unregister_computation('c1', 'agt_test')
wait_run()
# Un-Registration must be effective on both agents
with pytest.raises(UnknownComputation):
agt_dir.discovery.computation_agent('c1')
with pytest.raises(UnknownComputation):
agt1.discovery.computation_agent('c1')
def test_subscribe_computation_no_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1', 'agt1')
agt2.discovery.subscribe_computation('c1')
wait_run()
assert agt2.discovery.computation_agent('c1') == 'agt1'
def test_subscribe_computation_one_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
cb = agt2.discovery.subscribe_computation('c1', MagicMock())
wait_run()
cb.assert_called_once_with('computation_added', 'c1', agt1.name)
assert agt2.discovery.computation_agent('c1') == 'agt1'
def test_unsubscribe_computation_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
cb = agt2.discovery.subscribe_computation('c1', MagicMock())
wait_run()
cb.reset_mock()
agt2.discovery.unsubscribe_computation('c1', cb)
cb.assert_not_called()
agt1.discovery.register_computation('c1', 'agt_new', 'addr_new')
assert agt2.discovery.computation_agent('c1') == agt1.name
def test_unsubscribe_computation_no_cb(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
cb = agt2.discovery.subscribe_computation('c1', MagicMock())
wait_run()
cb.reset_mock()
agt2.discovery.unsubscribe_computation('c1', cb)
cb.assert_not_called()
agt1.discovery.register_computation('c1', 'agt_new', 'addr_new')
assert agt2.discovery.computation_agent('c1') == agt1.name
def test_register_replica_for_unknown_replication_should_raise(
directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
with pytest.raises(UnknownComputation):
agt1.discovery.register_replica('c1', agt1.name)
def test_register_replica_local(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
agt1.discovery.register_replica('c1', agt1.name)
wait_run()
# Both agents must but be able to know who is hosting the replicas for c1
assert agt1.discovery.replica_agents('c1') == {agt1.name}
def test_replica_should_be_visible_for_subscribed_agents(
directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
agt1.discovery.register_replica('c1', agt1.name)
agt2.discovery.subscribe_computation('c1')
agt2.discovery.subscribe_replica('c1')
wait_run()
assert agt2.discovery.computation_agent('c1') == agt1.name
# Both agents must but be able to know who is hosting the replicas for c1
assert agt2.discovery.replica_agents('c1') == {agt1.name}
def test_replica_is_not_visible_for_not_subscribed_agents(
directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
agt1.discovery.register_replica('c1', agt1.name)
wait_run()
# c2 is not subscribed to c&, should not even see the computation
with pytest.raises(UnknownComputation):
agt2.discovery.replica_agents('c1')
agt2.discovery.subscribe_computation('c1')
wait_run()
# c2 is subscribed to computation c1, but not its replicas: should not
# see the replica
assert agt2.discovery.replica_agents('c1') == set()
def test_replica_removal_must_be_sent(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
agt1.discovery.register_replica('c1', agt1.name)
agt2.discovery.subscribe_computation('c1')
agt2.discovery.subscribe_replica('c1')
wait_run()
assert agt2.discovery.replica_agents('c1') == {agt1.name}
agt1.discovery.unregister_replica('c1', agt1.name)
wait_run()
assert agt2.discovery.replica_agents('c1') == set()
def test_replica_removal_callback(directory_discovery):
# make sure that registered callback is called when registering or
# removing a replica
agt_dir, agt1, agt2 = directory_discovery
rep_cb = MagicMock()
agt1.discovery.register_computation('c1')
agt2.discovery.subscribe_computation('c1')
agt2.discovery.subscribe_replica('c1', rep_cb)
wait_run()
agt1.discovery.register_replica('c1', agt1.name)
wait_run()
rep_cb.assert_called_once_with('replica_added', 'c1', agt1.name)
rep_cb.reset_mock()
agt1.discovery.unregister_replica('c1', agt1.name)
wait_run()
rep_cb.assert_called_once_with('replica_removed', 'c1', agt1.name)
def test_replica_unsubscribe(directory_discovery):
agt_dir, agt1, agt2 = directory_discovery
agt1.discovery.register_computation('c1')
agt1.discovery.register_replica('c1', agt1.name)
agt2.discovery.subscribe_computation('c1')
agt2.discovery.subscribe_replica('c1')
wait_run()
assert agt2.discovery.replica_agents('c1') == {agt1.name}
agt2.discovery.unsubscribe_replica('c1')
agt1.discovery.unregister_replica('c1', agt1.name)
wait_run()
assert agt2.discovery.replica_agents('c1') == set()
def wait_run():
# Small wait, just to give some slack for other threads to run.
sleep(0.1)
|
py | b4090d47e90cf5b2a024f16cfe0f894aa23562d1 | ##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# CCF tests
#
# Author: cheatwood
# ----------------------------------------------------------------------------
# Wx, Sky, MaxT, MinT, Wind are 24 hour grids
# PoP, SnowAmt are 12 hour grids
# All grids, except Sky
# Sky grids are included in each CCF test case
CCF_createGrids = [
("Fcst", "Wx", "WEATHER", 0, 24, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 0, 24, "Wide:R:--:<NoVis>:^Wide:S:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 0, 24, "Wide:S:+:1/4SM:", ["area2"]),
("Fcst", "Wx", "WEATHER", 0, 24, "Wide:RW:--:<NoVis>:^Wide:T:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 24, 48, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 24, 48, "Chc:T:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 24, 48, "SChc:R:--:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 24, 48, "SChc:ZR:--:<NoVis>:", ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 70, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin", "MinTEnd", 43, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 80, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin", "MinTEnd", 47, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin", "MaxTEnd", 90, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin", "MinTEnd", 49, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 24", "MaxTEnd + 24", 73, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 24", "MinTEnd + 24", 45, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 24", "MaxTEnd + 24", 81, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 24", "MinTEnd + 24", 48, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 24", "MaxTEnd + 24", 92, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 24", "MinTEnd + 24", 50, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 75, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 82, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 48", "MaxTEnd + 48", 95, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 47, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 50, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 48", "MinTEnd + 48", 52, ["area3"]),
("Fcst", "PoP", "SCALAR", 0, 12, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 0, 12, 45, ["area2"]),
("Fcst", "PoP", "SCALAR", 0, 12, 45, ["area3"]),
("Fcst", "PoP", "SCALAR", 12, 24, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 12, 24, 45, ["area2"]),
("Fcst", "PoP", "SCALAR", 12, 24, 45, ["area3"]),
("Fcst", "PoP", "SCALAR", 24, 36, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 24, 36, 60, ["area2"]),
("Fcst", "PoP", "SCALAR", 24, 36, 70, ["area3"]),
("Fcst", "PoP", "SCALAR", 36, 48, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 36, 48, 60, ["area2"]),
("Fcst", "PoP", "SCALAR", 36, 48, 70, ["area3"]),
("Fcst", "Wind", "VECTOR", 0, 24, (40, "SW"), "all"),
("Fcst", "Wind", "VECTOR", 24, 48, (35, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 48, 72, (45, "W"), "all"),
("Fcst", "Wind", "VECTOR", 72, 96, (50, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 96, 120, (45, "N"), "all"),
("Fcst", "Wind", "VECTOR", 120, 144, (40, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 144, 168, (30, "W"), "all"),
("Fcst", "Wind", "VECTOR", 168, 192, (35, "W"), "all"),
("Fcst", "SnowAmt", "SCALAR", 0, 12, 0, "all"),
("Fcst", "SnowAmt", "SCALAR", 12, 24, 6, "all"),
("Fcst", "SnowAmt", "SCALAR", 24, 36, 3, "all"),
("Fcst", "SnowAmt", "SCALAR", 36, 48, 1, "all"),
("Fcst", "Wx", "WEATHER", 48, 72, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 48, 72, "Iso:RW:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 48, 72, "Areas:ZL:--:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 48, 72, "Sct:SW:--:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 72, 96, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 72, 96, "Def:BS:<NoInten>:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 72, 96, "SChc:S:-:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 72, 96, "Def:BS:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 96, 120, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 96, 120, "Iso:SW:-:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 96, 120, "Areas:F:+:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 96, 120, "Wide:R:--:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 120, 144, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 120, 144, "Wide:L:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 120, 144, "Patchy:L:--:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 120, 144, "Patchy:BD:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 144, 168, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 144, 168, "Wide:IP:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 144, 168, "Def:H:<NoInten>:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 144, 168, "Patchy:K:<NoInten>:<NoVis>:", ["area3"]),
("Fcst", "Wx", "WEATHER", 168, 192, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 168, 192, "Wide:R:--:<NoVis>:", ["area1"]),
("Fcst", "Wx", "WEATHER", 168, 192, "Sct:RW:-:<NoVis>:", ["area2"]),
("Fcst", "Wx", "WEATHER", 168, 192, "Chc:R:+:<NoVis>:", ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 47, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 77, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 50, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 85, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 72", "MinTEnd + 72", 52, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 72", "MaxTEnd + 72", 96, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 49, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 79, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 51, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 86, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 96", "MinTEnd + 96", 54, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 96", "MaxTEnd + 96", 100, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 120", "MinTEnd + 120", 49, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 81, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 120", "MinTEnd + 120", 53, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 60, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 120", "MinTEnd + 120", 55, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 103, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 49, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 81, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 50, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 80, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 55, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 96, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 168", "MaxTEnd + 168", 83, ["area1"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 168", "MaxTEnd + 168", 85, ["area2"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 168", "MaxTEnd + 168", 100, ["area3"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 168", "MinTEnd + 168", 52, ["area1"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 168", "MinTEnd + 168", 54, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 168", "MinTEnd + 168", 58, ["area3"]),
("Fcst", "PoP", "SCALAR", 36, 48, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 36, 48, 60, ["area2"]),
("Fcst", "PoP", "SCALAR", 36, 48, 50, ["area3"]),
("Fcst", "PoP", "SCALAR", 48, 60, 55, ["area1"]),
("Fcst", "PoP", "SCALAR", 48, 60, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 48, 60, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 60, 72, 60, ["area1"]),
("Fcst", "PoP", "SCALAR", 60, 72, 65, ["area2"]),
("Fcst", "PoP", "SCALAR", 60, 72, 66, ["area3"]),
("Fcst", "PoP", "SCALAR", 72, 84, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 72, 84, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 72, 84, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 84, 96, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 84, 96, 50, ["area2"]),
("Fcst", "PoP", "SCALAR", 84, 96, 50, ["area3"]),
("Fcst", "PoP", "SCALAR", 96, 108, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 96, 108, 50, ["area2"]),
("Fcst", "PoP", "SCALAR", 96, 108, 45, ["area3"]),
("Fcst", "PoP", "SCALAR", 108, 120, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 108, 120, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 108, 120, 50, ["area3"]),
("Fcst", "PoP", "SCALAR", 120, 132, 55, ["area1"]),
("Fcst", "PoP", "SCALAR", 120, 132, 60, ["area2"]),
("Fcst", "PoP", "SCALAR", 120, 132, 55, ["area3"]),
("Fcst", "PoP", "SCALAR", 132, 144, 45, ["area1"]),
("Fcst", "PoP", "SCALAR", 132, 144, 50, ["area2"]),
("Fcst", "PoP", "SCALAR", 132, 144, 55, ["area3"]),
("Fcst", "PoP", "SCALAR", 144, 156, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 144, 156, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 144, 156, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 156, 168, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 156, 168, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 156, 168, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 168, 180, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 168, 180, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 168, 180, 60, ["area3"]),
("Fcst", "PoP", "SCALAR", 180, 192, 50, ["area1"]),
("Fcst", "PoP", "SCALAR", 180, 192, 55, ["area2"]),
("Fcst", "PoP", "SCALAR", 180, 192, 60, ["area3"]),
]
CCF_deleteGrids = [
("Fcst", "PoP", "SFC", 0,280),
("Fcst", "MaxT", "SFC", 0,280),
("Fcst", "MinT", "SFC", 0,280),
("Fcst", "T", "SFC", 0,280),
("Fcst", "Td", "SFC", 0,280),
("Fcst", "WindChill", "SFC", 0,280),
("Fcst", "HeatIndex", "SFC", 0,280),
("Fcst", "Wind", "SFC", 0,280),
("Fcst", "Sky", "SFC", 0,280),
("Fcst", "WindGust", "SFC", 0,280),
("Fcst", "Wx", "SFC", 0,280),
("Fcst", "QPF", "SFC", 0,280),
("Fcst", "SnowAmt", "SFC", 0,280),
]
scripts = [
# Morning CCF test
# Wind dominates in the Wx grids
{
"commentary": """
Morning CCF test
Wind, N, overrides some Wx codes in the Wx grids
""",
"name":"CCF_1",
"productType":"CCF",
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None, ('Forecaster Number', 'forecasterNumber'): 99.0 }",
"comboFlag": 1,
"checkStrings": [
"CCFTBW",
"AREA1 OT 070/043 073/045 075 99555 0102/0406/0103",
"WQJLX 047/077 047/079 049/081 049/081 6665556655",
"AREA2 PR 080/047 081/048 082 99566 0102/0406/0103",
"ZSNLN 050/085 050/086 051/060 053/080 6776566666",
"AREA3 TY 090/049 092/050 095 99577 0102/0406/0103",
"JQRNN 052/096 052/100 054/103 055/096 6776556666",
],
"createGrids": CCF_createGrids + [
("Fcst", "Sky", "SCALAR", 0, 24, 0, "all"),
("Fcst", "Sky", "SCALAR", 24, 48, 96, "all"),
("Fcst", "Sky", "SCALAR", 48, 72, 70, "all"),
("Fcst", "Sky", "SCALAR", 72, 96, 32, "all"),
("Fcst", "Sky", "SCALAR", 96, 120, 0, "all"),
("Fcst", "Sky", "SCALAR", 120, 144, 96, "all"),
("Fcst", "Sky", "SCALAR", 144, 168, 70, "all"),
("Fcst", "Sky", "SCALAR", 168, 192, 32, "all"),
],
},
# Afternoon CCF test
# Wind dominates in Wx grids
{
"commentary": """
Afternoon CCF test
Wind, N, overrides some Wx codes in the Wx grids
""",
"name":"CCF_2",
"productType":"CCF",
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Afternoon', ('Issued By', 'issuedBy'): None, ('Forecaster Number', 'forecasterNumber'): 99.0 }",
"comboFlag": 1,
"checkStrings": [
"CCFTBW",
"AREA1 TW 043/073 045/075 047 99556 0406/0103/0001",
"QJLXR 077/047 079/049 081/049 081/049 083 66555665555",
"AREA2 RZ 047/081 048/082 050 99666 0406/0103/0001",
"SNLNW 085/050 086/051 060/053 080/050 085 77656666666",
"AREA3 YJ 049/092 050/095 052 99776 0406/0103/0001",
"QRNNR 096/052 100/054 103/055 096/055 100 77655666666",
],
"createGrids": CCF_createGrids + [
("Fcst", "Sky", "SCALAR", 0, 24, 0, "all"),
("Fcst", "Sky", "SCALAR", 24, 48, 96, "all"),
("Fcst", "Sky", "SCALAR", 48, 72, 70, "all"),
("Fcst", "Sky", "SCALAR", 72, 96, 32, "all"),
("Fcst", "Sky", "SCALAR", 96, 120, 0, "all"),
("Fcst", "Sky", "SCALAR", 120, 144, 96, "all"),
("Fcst", "Sky", "SCALAR", 144, 168, 70, "all"),
("Fcst", "Sky", "SCALAR", 168, 192, 32, "all"),
],
},
# Morning CCF test
# Changed MaxT to include a "VRYHOT = G" and "VRYCOLD = I" Wx code
# Changed Winds to "0" so they would not dominate over the other Wx grids
# Wind "0, 24" was kept at "35" to keep a "BLZZRD = P" Wx code
{
"commentary": """
Morning CCF test
MaxT is set at 120 and 144 hours to include a VRYHOT = G Wx code for AREA3 and a VRYCOLD = I Wx code for AREA2
The Wind grids are set to 0, except at 0-24 hours, to not override any Wx codes
Wind, 0-24 hours, is kept at 35 to output a BLZZRD = P Wx code
""",
"name":"CCF_3",
"productType":"CCF",
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Morning', ('Issued By', 'issuedBy'): None, ('Forecaster Number', 'forecasterNumber'): 99.0 }",
"comboFlag": 1,
"checkStrings": [
"CCFTBW",
"AREA1 OT 070/043 073/045 075 99555 0102/0406/0103",
"WQJLX 047/077 047/079 049/081 049/081 6665556655",
"AREA2 PR 080/047 081/048 082 99566 0102/0406/0103",
"ZSFLI 050/085 050/086 051/060 053/018 6776566666",
"AREA3 TY 090/049 092/050 095 99577 0102/0406/0103",
"JQRGK 052/096 052/100 054/106 055/096 6776556666",
],
"createGrids": CCF_createGrids + [
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 120", "MaxTEnd + 120", 106, ["area3"]),
("Fcst", "MaxT", "SCALAR", "MaxTBegin + 144", "MaxTEnd + 144", 18, ["area2"]),
("Fcst", "MinT", "SCALAR", "MinTBegin + 144", "MinTEnd + 144", 13, ["area2"]),
("Fcst", "Wind", "VECTOR", 0, 24, (35, "SW"), "all"),
("Fcst", "Wind", "VECTOR", 24, 48, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 48, 72, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 72, 96, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 96, 120, (0, "N"), "all"),
("Fcst", "Wind", "VECTOR", 120, 144, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 144, 168, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 168, 192, (0, "W"), "all"),
("Fcst", "Sky", "SCALAR", 0, 24, 0, "all"),
("Fcst", "Sky", "SCALAR", 24, 48, 96, "all"),
("Fcst", "Sky", "SCALAR", 48, 72, 70, "all"),
("Fcst", "Sky", "SCALAR", 72, 96, 32, "all"),
("Fcst", "Sky", "SCALAR", 96, 120, 0, "all"),
("Fcst", "Sky", "SCALAR", 120, 144, 96, "all"),
("Fcst", "Sky", "SCALAR", 144, 168, 70, "all"),
("Fcst", "Sky", "SCALAR", 168, 192, 32, "all"),
],
},
# Afternoon CCF test
# Changed Winds to "0" so they would not be dominate over the other Wx grids
{
"commentary": """
Afternoon CCF test
Wind grids are set to 0 so they will not dominate over the other Wx grids
""",
"name":"CCF_4",
"productType":"CCF",
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Afternoon', ('Issued By', 'issuedBy'): None, ('Forecaster Number', 'forecasterNumber'): 99.0 }",
"comboFlag": 1,
"checkStrings": [
"CCFTBW",
"AREA1 TE 043/073 045/075 047 99555 0406/0103/0001",
"QJLXR 077/047 079/049 081/049 081/049 083 05555665555",
"AREA2 RZ 047/081 048/082 050 99666 0406/0103/0001",
"SFLHW 085/050 086/051 060/053 080/050 085 06656666666",
"AREA3 YM 049/092 050/095 052 99775 0406/0103/0001",
"QRDKR 096/052 100/054 103/055 096/055 100 16655666666",
],
"createGrids": CCF_createGrids + [
("Fcst", "Wind", "VECTOR", 0, 24, (35, "SW"), "all"),
("Fcst", "Wind", "VECTOR", 24, 48, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 48, 72, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 72, 96, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 96, 120, (0, "N"), "all"),
("Fcst", "Wind", "VECTOR", 120, 144, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 144, 168, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 168, 192, (0, "W"), "all"),
("Fcst", "Sky", "SCALAR", 0, 24, 0, "all"),
("Fcst", "Sky", "SCALAR", 24, 48, 96, "all"),
("Fcst", "Sky", "SCALAR", 48, 72, 70, "all"),
("Fcst", "Sky", "SCALAR", 72, 96, 32, "all"),
("Fcst", "Sky", "SCALAR", 96, 120, 0, "all"),
("Fcst", "Sky", "SCALAR", 120, 144, 96, "all"),
("Fcst", "Sky", "SCALAR", 144, 168, 70, "all"),
("Fcst", "Sky", "SCALAR", 168, 192, 32, "all"),
("Fcst", "PoP", "SCALAR", 48, 72, 10, ["area3"]),
],
},
# Afternoon CCF test
# Changed Wx grids to "NoWx"
# Changed all Wind grids to "0"
# "Sky" grids are now visible
{
"commentary": """
Afternoon CCF test
Wx grids are set to NoWx
Wind grids are set to 0
Sky Wx code is visible
""",
"name":"CCF_5",
"productType":"CCF",
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Afternoon', ('Issued By', 'issuedBy'): None, ('Forecaster Number', 'forecasterNumber'): 99.0 }",
"comboFlag": 1,
"checkStrings": [
"CCFTBW",
"AREA1 CE 043/073 045/075 047 99556 0406/0103/0001",
"BUCEB 077/047 079/049 081/049 081/049 083 66555665555",
"AREA2 CE 047/081 048/082 050 99666 0406/0103/0001",
"BUCEB 085/050 086/051 060/053 080/050 085 77656666666",
"AREA3 CE 049/092 050/095 052 99776 0406/0103/0001",
"BUCEB 096/052 100/054 103/055 096/055 100 77655666666",
],
"createGrids": CCF_createGrids + [
("Fcst", "Wx", "WEATHER", 0, 24, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 24, 48, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 48, 72, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 72, 96, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 96, 120, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 120, 144, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 144, 168, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 168, 192, "NoWx", "all"),
("Fcst", "Wind", "VECTOR", 0, 24, (0, "SW"), "all"),
("Fcst", "Wind", "VECTOR", 24, 48, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 48, 72, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 72, 96, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 96, 120, (0, "N"), "all"),
("Fcst", "Wind", "VECTOR", 120, 144, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 144, 168, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 168, 192, (0, "W"), "all"),
("Fcst", "Sky", "SCALAR", 0, 24, 0, "all"),
("Fcst", "Sky", "SCALAR", 24, 48, 96, "all"),
("Fcst", "Sky", "SCALAR", 48, 72, 70, "all"),
("Fcst", "Sky", "SCALAR", 72, 96, 32, "all"),
("Fcst", "Sky", "SCALAR", 96, 120, 0, "all"),
("Fcst", "Sky", "SCALAR", 120, 144, 96, "all"),
("Fcst", "Sky", "SCALAR", 144, 168, 70, "all"),
("Fcst", "Sky", "SCALAR", 168, 192, 32, "all"),
],
},
# Clean up for CCF6 test
# Need to clear out "Sky" grids for test CCF6 below
{
"name":"CCFCleanUpforCCF6",
"commentary": "Clean out grids",
"productType": None,
"deleteGrids": [
("Fcst", "Sky", "SFC", 0, 192),
],
"fileChanges": [],
},
# Must run CCF6 and CCFCleanUpforCCF6 together in order to clean out the Sky codes to get the "FAIR = A" Sky code
# Afternoon CCF test
# Changed Wx grids to "NoWx"
# Changed all Wind grids to "0"
# "Sky" grids are absent in order to get a "FAIR = A" Wx code
{
"commentary": """
Afternoon CCF test
Wx grids are set to NoWx
Wind grids are set to 0
Sky grids are absent in order to get a FAIR = A Wx code
""",
"name":"CCF6",
"productType":"CCF",
"cmdLineVars": "{('Product Issuance', 'productIssuance'): 'Afternoon', ('Issued By', 'issuedBy'): None, ('Forecaster Number', 'forecasterNumber'): 99.0 }",
"comboFlag": 1,
"checkStrings": [
"CCFTBW",
"AREA1 AA 043/073 045/075 047 99556 0406/0103/0001",
"AAAAA 077/047 079/049 081/049 081/049 083 66555665555",
"AREA2 AA 047/081 048/082 050 99666 0406/0103/0001",
"AAAAA 085/050 086/051 060/053 080/050 085 77656666666",
"AREA3 AA 049/092 050/095 052 99776 0406/0103/0001",
"AAAAA 096/052 100/054 103/055 096/055 100 77655666666",
],
"createGrids": CCF_createGrids + [
("Fcst", "Wx", "WEATHER", 0, 24, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 24, 48, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 48, 72, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 72, 96, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 96, 120, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 120, 144, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 144, 168, "NoWx", "all"),
("Fcst", "Wx", "WEATHER", 168, 192, "NoWx", "all"),
("Fcst", "Wind", "VECTOR", 0, 24, (0, "SW"), "all"),
("Fcst", "Wind", "VECTOR", 24, 48, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 48, 72, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 72, 96, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 96, 120, (0, "N"), "all"),
("Fcst", "Wind", "VECTOR", 120, 144, (0, "NW"), "all"),
("Fcst", "Wind", "VECTOR", 144, 168, (0, "W"), "all"),
("Fcst", "Wind", "VECTOR", 168, 192, (0, "W"), "all"),
],
},
]
defaultEditAreas = """Definition["defaultEditAreas"] = [
("area1", "AREA1"),
("area2", "AREA2"),
("area3", "AREA3"),
]
"""
import TestScript
import AbsTime
def testScript(self, dataMgr, level="Site"):
today = self.getTimeRange("Today").startTime()
today_10Z = AbsTime.absTimeYMD(today.year, today.month, today.day,
10, 0)
defaults = {
"internalStrip": 0,
"deleteGrids": CCF_deleteGrids,
"gridsStartTime": today_10Z,
"fileChanges": [
("CCF_<site>_Definition", "TextUtility", "add", defaultEditAreas, "undo"),
],
}
return TestScript.generalTestScript(self, dataMgr, scripts, defaults, level=level)
|
py | b4090de59e86502c60aa8af5695a6b5198403807 | """
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from typing import Optional
from TagScriptEngine import Block, Context, helper_parse_if
class SilentBlock(Block):
# This is an undocumented block and should not be used.
def will_accept(self, ctx: Context) -> bool:
dec = ctx.verb.declaration.lower()
return any([dec == "silent", dec == "silence"])
def process(self, ctx: Context) -> Optional[str]:
if "silent" in ctx.response.actions.keys():
return None
if ctx.verb.parameter is None:
value = True
else:
value = helper_parse_if(ctx.verb.parameter)
ctx.response.actions["silent"] = value
return ""
|
py | b4090e0a7dd1f43f759b3d78f1b05e7f6090c0bd | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-12-16 15:00
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('mentalhealth', '0004_auto_20161216_1458'),
]
operations = [
migrations.AlterField(
model_name='mentalhealthreview',
name='ethnicity',
field=models.ManyToManyField(blank=True, to='mentalhealth.Ethnicity'),
),
migrations.AlterField(
model_name='mentalhealthreview',
name='gender',
field=models.ManyToManyField(blank=True, to='mentalhealth.Gender'),
),
migrations.AlterField(
model_name='mentalhealthreview',
name='identity',
field=models.ManyToManyField(blank=True, to='mentalhealth.Identity'),
),
migrations.AlterField(
model_name='mentalhealthreview',
name='sexual_orientation',
field=models.ManyToManyField(blank=True, to='mentalhealth.SexualOrientation'),
),
]
|
py | b4090f5fd3931a0565bc0aaf497bccca5ee2c289 | # coding: utf-8
from dynaconf import settings
def main():
print(settings.SOME_VARIABLE)
print(settings.get('SOME_VARIABLE'))
if __name__ == "__main__":
main() |
py | b409100cddbcdfb56b7170559cb2860463e163ab | #!/usr/bin/env python3
"""Train MAML from featurized data
For description of available argument:
python src/train_maml.py --help
Usage:
python src/train_maml.py \\
--save_path <directory to store checkpoint> \\
--source <directory where training and validation data is stored> \\
...
Note:
- The directory provided to --source must contain 3 files:
- featurized_data.pkl: Pickled data for graphs. Generated by src/featurize.py
- meta_split_idx.pkl: Pickled task splits dictionary with schema:
{
"meta_train": {
"train": [train_task_idx1, train_task_idx2,...],
"val": [val_task_idx1, val_task_idx2,...],
"test": [test_task_idx1, test_task_idx2,...]
},
"meta_val": {...},
"meta_test": {...}
}
- dataset_split_idx.pkl: Pickled splits dictionary with schema similar to meta_split_idx.pkl.
In the example below, [task1_train_cmpd_idx1, task1_train_cmpd_idx2 ...] are compound indices
for train_task_idx1 above.
{
"meta_train": {
"train": [
[task1_train_cmpd_idx1, task1_train_cmpd_idx2 ...],
[task2_train_cmpd_idx1, task2_train_cmpd_idx2,...],
...
],
"val": [
[task1_val_idx1, task1_val_idx2 ...],
[task2_val_idx1, task2_val_idx1,...],
...
],
"test": [...]
},
"meta_val": {...},
"meta_test": {...}
}
"""
import copy
import gc
import json
import os
import pickle
import random
import sys
from functools import partial
import learn2learn as l2l
import numpy as np
import torch
from absl import app, flags, logging
from sklearn.metrics import average_precision_score
from sklearn.model_selection import train_test_split
from torch import nn, optim
from torchvision import transforms
from torchvision.datasets import ImageFolder
from src.models.ggnn import GatedGraphNeuralNetwork
from src.models.l2l_maml import MAML
from src.training.meta import meta_training
from src.utils import dataloaders, torch_utils
FLAGS = flags.FLAGS
# Save path
flags.DEFINE_string("save_path", None, "Misc: Folder directory for saving MAML models")
# GGNN architecture hyperparameters
flags.DEFINE_integer("n_conv", 7, "Architecture: Number of gated graph convolution layers")
flags.DEFINE_integer("fc_dims", 1024, "Architecture: Number of fully connected layers")
# Data source for training
flags.DEFINE_string("source", None, "Training: Data folder (see docstrings for requirements)")
# Training hyperparameters
flags.DEFINE_integer("seed", 0, "Training: Random seed")
flags.DEFINE_string("mode", "binary_classification", "Training: [regression, binary_classification]")
flags.DEFINE_float("meta_lr", 0.005, "Training: Meta learning rate")
flags.DEFINE_integer("meta_batch_size", 32, "Training: Meta batch size")
flags.DEFINE_float("inner_lr", 0.1, "Training: Inner loop learning rate")
flags.DEFINE_integer("inner_batch_size", 64, "Training: Inner loop batch size")
flags.DEFINE_integer("inner_steps", 2, "Training: Number of gradient steps to take in inner loop")
flags.DEFINE_bool("first_order", False, "Training: Use first order approximation in MAML")
flags.DEFINE_bool("weight_norm", False, "Training: Apply weight normalization to model weights")
flags.DEFINE_bool("anil", False, "Training: Use the ANIL algorithm from DeepMind")
flags.DEFINE_integer("meta_steps", 60000, "Training: Number of meta gradient steps to take")
flags.DEFINE_string("init_path", None, "Training: Path for model initialization")
flags.DEFINE_integer("ckpt_steps", 100, "Training: Number of iterations between checkpoints")
flags.DEFINE_string("metrics", "accuracy_score", "Training: Metrics to capture during training")
# Additional notes for training run
flags.DEFINE_string("notes", None, "Misc: Notes for training run")
flags.mark_flag_as_required("save_path")
flags.mark_flag_as_required("source")
def main(argv):
logging.info(f"Starting MAML training with {FLAGS.source} dataset.")
ckpt_save_path = os.path.join(FLAGS.save_path, "ckpts")
os.makedirs(ckpt_save_path, exist_ok=True)
logging.info(f"Setting seed...")
torch_utils.set_seed(FLAGS.seed)
metadata = [f.serialize() for f in FLAGS.get_key_flags_for_module(sys.argv[0])]
metadata = [m for m in metadata if m] # remove empty flags
metadata = "\n\t" + "\n\t".join(metadata)
logging.info(f"Current parameters: {metadata}")
flag_file = os.path.join(ckpt_save_path, "flagfile.txt")
FLAGS.flags_into_string()
FLAGS.append_flags_into_file(flag_file)
logging.info(f"Flags are stored to {flag_file}")
logging.info("Loading data...")
loaders = dataloaders.get_loaders(
source_path=FLAGS.source, inner_batch_size=FLAGS.inner_batch_size
)
logging.info("Instantiating model and optimizers...")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = GatedGraphNeuralNetwork(
n_edge=1, in_dim=75, n_conv=FLAGS.n_conv, fc_dims=[FLAGS.fc_dims, 1], p_dropout=0.0
)
if FLAGS.init_path is not None:
logging.info(f"Loading initializations from {FLAGS.init_path}")
model = torch.load(FLAGS.init_path)
model = model.to(device)
meta_learner = MAML(model, lr=FLAGS.inner_lr, first_order=FLAGS.first_order, anil=FLAGS.anil)
optimizer = optim.Adam(meta_learner.parameters(), FLAGS.meta_lr)
if FLAGS.mode == "binary_classification":
pos_weight = torch.tensor(
[l.dataset.y.sum() / len(l.dataset.y) for l in loaders["meta_train"]["train"]]
).mean()
criterion = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
elif FLAGS.mode == "regression":
criterion = nn.MSELoss()
else:
logging.error(f"--mode {FLAGS.mode} is not supported. Choose from ['binary_classification', 'regression'].")
sys.exit(1)
metrics = FLAGS.metrics.split(",") if FLAGS.metrics else []
logging.info(f"Begin training!")
meta_training(
meta_learner=meta_learner,
meta_steps=FLAGS.meta_steps,
meta_batch_size=FLAGS.meta_batch_size,
loaders=loaders,
optimizer=optimizer,
criterion=criterion,
inner_steps=FLAGS.inner_steps,
device=device,
save_path=ckpt_save_path,
ckpt_steps=FLAGS.ckpt_steps,
metrics=metrics,
)
if __name__ == "__main__":
app.run(main)
|
py | b40910aac33520892a5c5b22b09fd26b8dd5da4b | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# File: tower.py
# Author: Yuxin Wu <[email protected]>
import tensorflow as tf
import re
from ..utils.naming import *
__all__ = ['get_current_tower_context', 'TowerContext']
_CurrentTowerContext = None
class TowerContext(object):
def __init__(self, tower_name, is_training=None):
""" tower_name: 'tower0', 'towerp0', or '' """
self._name = tower_name
if is_training is None:
is_training = not self._name.startswith(PREDICT_TOWER)
self._is_training = is_training
@property
def is_main_training_tower(self):
return self.is_training and (self._name == '' or self._name == 'tower0')
@property
def is_main_tower(self):
return self._name == '' or self._name == 'tower0'
@property
def is_training(self):
return self._is_training
@property
def name(self):
return self._name
def get_variable_on_tower(self, *args, **kwargs):
"""
Get a variable for this tower specifically, without reusing.
Tensorflow doesn't allow reuse=False scope under a
reuse=True scope. This method provides a work around.
See https://www.tensorflow.org/versions/master/how_tos/variable_scope/index.html#basics-of-tfvariable-scope
:param args, kwargs: same as tf.get_variable()
"""
with tf.variable_scope(self._name) as scope:
with tf.variable_scope(scope, reuse=False):
scope = tf.get_variable_scope()
assert scope.reuse == False
return tf.get_variable(*args, **kwargs)
def find_tensor_in_main_tower(self, graph, name):
if self.is_main_tower:
return graph.get_tensor_by_name(name)
if name.startswith(PREDICT_TOWER):
predict_tower_prefix = '{}[0-9]+/'.format(PREDICT_TOWER)
newname = re.sub(predict_tower_prefix, '', name)
try:
return graph.get_tensor_by_name(newname)
except KeyError:
newname = re.sub(predict_tower_prefix, 'tower0/', name)
return graph.get_tensor_by_name(newname)
def __enter__(self):
global _CurrentTowerContext
assert _CurrentTowerContext is None, \
"Nesting TowerContext!"
_CurrentTowerContext = self
if len(self._name):
self._scope = tf.name_scope(self._name)
return self._scope.__enter__()
def __exit__(self, exc_type, exc_val, exc_tb):
global _CurrentTowerContext
_CurrentTowerContext = None
if len(self._name):
self._scope.__exit__(exc_type, exc_val, exc_tb)
return False
def get_current_tower_context():
global _CurrentTowerContext
return _CurrentTowerContext
|
py | b409116515e661919e0812695e65bdcd4e8b14b0 | import binascii
from Crypto.Cipher import AES
from cxc_toolkit import integer
def byte_xor(a: bytes, b: bytes) -> bytes:
'''
:return: xor result of a and b
'''
if len(a) > len(b):
return bytes([x ^ y for x, y in zip(a[:len(b)], b)])
else:
return bytes([x ^ y for x, y in zip(a, b[:len(a)])])
def to_int(byte):
"""
Convert bytes to int
:type byte: bytes
:rtype: int
"""
s = 0
for i, number in enumerate(byte):
s = s * 256 + number
return s
def byte_add(byte, addtions):
"""
Add int to bytes
:type byte: bytes
:type addtions: int
:rtype: bytes
"""
return integer.to_bytes(to_int(byte) + addtions, bytes_size=len(byte))
def msg_block_generator(msg, padding=False):
while len(msg) >= 16:
yield msg[:16]
msg = msg[16:]
if len(msg) > 0:
if not padding:
yield msg
return
reminder = 16 - len(msg)
msg = msg + bytes([reminder]) * reminder
yield msg
else:
yield b'16' * 16
def cipher_block_generator(cipher):
while len(cipher):
yield cipher[:16]
cipher = cipher[16:]
class CBC:
def encrypt(msg, key, iv):
cipher = AES.new(key, AES.MODE_ECB)
cipher_block = iv
ciphertext = iv
for msg_block in msg_block_generator(msg, padding=True):
cipher_block = cipher.encrypt(byte_xor(cipher_block, msg_block))
ciphertext += cipher_block
return ciphertext
def decrypt(cipher_text, key):
cipher = AES.new(key, AES.MODE_ECB)
iv, cipher_text = cipher_text[:16], cipher_text[16:]
msg = b''
for cipher_block in cipher_block_generator(cipher_text):
msg_block = byte_xor(cipher.decrypt(cipher_block), iv)
iv = cipher_block
msg += msg_block
if msg[-16:] == b'\x16' * 16:
return msg[:-16]
pad_bytes = msg[-1]
reminder = len(msg) - pad_bytes
if msg[reminder:] == bytes([pad_bytes]) * pad_bytes:
return msg[:reminder]
else:
print('Cipher text is invalid')
class CTR:
def encrypt(msg, key, iv):
cipher = AES.new(key, AES.MODE_ECB)
ciphertext = b''
for i, msg_block in enumerate(msg_block_generator(msg, padding=False)):
cipher_block = cipher.encrypt(byte_add(iv, i))
cipher_block = byte_xor(msg_block, cipher_block)
ciphertext += cipher_block
return ciphertext
def decrypt(cipher_text, key):
iv, cipher_text = cipher_text[:16], cipher_text[16:]
cipher = AES.new(key, AES.MODE_ECB)
msg = b''
for i, cipher_block in enumerate(cipher_block_generator(cipher_text)):
iv_encrypted = cipher.encrypt(byte_add(iv, i))
msg_block = byte_xor(cipher_block, iv_encrypted)
msg += msg_block
return msg
|
py | b40913941db90fc1db7db26ced4a4e126ad5301e | import torch
import torchaudio.functional as F
import unittest
from parameterized import parameterized
from torchaudio_unittest.common_utils import PytorchTestCase, TorchaudioTestCase, skipIfNoSox, skipIfRocm
from .functional_impl import Functional, FunctionalCPUOnly
class TestFunctionalFloat32(Functional, FunctionalCPUOnly, PytorchTestCase):
dtype = torch.float32
device = torch.device('cpu')
@unittest.expectedFailure
@skipIfRocm
def test_lfilter_9th_order_filter_stability(self):
super().test_lfilter_9th_order_filter_stability()
@skipIfRocm
class TestFunctionalFloat64(Functional, PytorchTestCase):
dtype = torch.float64
device = torch.device('cpu')
@skipIfNoSox
class TestApplyCodec(TorchaudioTestCase):
backend = "sox_io"
def _smoke_test(self, format, compression, check_num_frames):
"""
The purpose of this test suite is to verify that apply_codec functionalities do not exhibit
abnormal behaviors.
"""
torch.random.manual_seed(42)
sample_rate = 8000
num_frames = 3 * sample_rate
num_channels = 2
waveform = torch.rand(num_channels, num_frames)
augmented = F.apply_codec(waveform,
sample_rate,
format,
True,
compression
)
assert augmented.dtype == waveform.dtype
assert augmented.shape[0] == num_channels
if check_num_frames:
assert augmented.shape[1] == num_frames
def test_wave(self):
self._smoke_test("wav", compression=None, check_num_frames=True)
@parameterized.expand([(96,), (128,), (160,), (192,), (224,), (256,), (320,)])
def test_mp3(self, compression):
self._smoke_test("mp3", compression, check_num_frames=False)
@parameterized.expand([(0,), (1,), (2,), (3,), (4,), (5,), (6,), (7,), (8,)])
def test_flac(self, compression):
self._smoke_test("flac", compression, check_num_frames=False)
@parameterized.expand([(-1,), (0,), (1,), (2,), (3,), (3.6,), (5,), (10,)])
def test_vorbis(self, compression):
self._smoke_test("vorbis", compression, check_num_frames=False)
|
py | b40913984e0d9a08276edd74c8a43fc4a6017a70 | import json
import numpy as np
import pdb
import torch
from ray_utils import get_rays, get_ray_directions, get_ndc_rays
BOX_OFFSETS = torch.tensor([[[i,j,k] for i in [0, 1] for j in [0, 1] for k in [0, 1]]],
device='cuda')
SQR_OFFSETS = torch.tensor([[[i,j] for i in [0, 1] for j in [0, 1] ]], device='cuda')
def hash(coords, log2_hashmap_size):
'''
coords: 3D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y, z = coords[..., 0], coords[..., 1], coords[..., 2]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
#return ((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663 ^ z*83492791)
def hash2d(coords, log2_hashmap_size):
'''
coords: 2D coordinates. B x 3
log2T: logarithm of T w.r.t 2
'''
x, y = coords[..., 0], coords[..., 1]
return torch.tensor((1<<log2_hashmap_size)-1) & (x*73856093 ^ y*19349663)
def xy2index(xy,resolution):
return xy[...,0]+xy[...,1]*resolution
def get_bbox3d_for_blenderobj(camera_transforms, H, W, near=2.0, far=6.0):
camera_angle_x = float(camera_transforms['camera_angle_x'])
focal = 0.5*W/np.tan(0.5 * camera_angle_x)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
for frame in camera_transforms["frames"]:
c2w = torch.FloatTensor(frame["transform_matrix"])
rays_o, rays_d = get_rays(directions, c2w)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([1.0,1.0,1.0]), torch.tensor(max_bound)+torch.tensor([1.0,1.0,1.0]))
def get_bbox3d_for_llff(poses, hwf, near=0.0, far=1.0):
H, W, focal = hwf
H, W = int(H), int(W)
# ray directions in camera coordinates
directions = get_ray_directions(H, W, focal)
min_bound = [100, 100, 100]
max_bound = [-100, -100, -100]
points = []
poses = torch.FloatTensor(poses)
for pose in poses:
rays_o, rays_d = get_rays(directions, pose)
rays_o, rays_d = get_ndc_rays(H, W, focal, 1.0, rays_o, rays_d)
def find_min_max(pt):
for i in range(3):
if(min_bound[i] > pt[i]):
min_bound[i] = pt[i]
if(max_bound[i] < pt[i]):
max_bound[i] = pt[i]
return
for i in [0, W-1, H*W-W, H*W-1]:
min_point = rays_o[i] + near*rays_d[i]
max_point = rays_o[i] + far*rays_d[i]
points += [min_point, max_point]
find_min_max(min_point)
find_min_max(max_point)
return (torch.tensor(min_bound)-torch.tensor([0.1,0.1,0.0001]), torch.tensor(max_bound)+torch.tensor([0.1,0.1,0.0001]))
def get_voxel_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int()
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS
hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices_old(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
#pdb.set_trace()
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
def get_plane_vertices(xyz, bounding_box, resolution, log2_hashmap_size):
'''
xyz: 3D coordinates of samples. B x 3
bounding_box: min and max x,y,z coordinates of object bbox
resolution: number of voxels per axis
'''
def box2plane(input):
in_xy = input[:,:2]#.unsqueeze(1)
in_xz = input[:,::2]#.unsqueeze(1)
in_yz = input[:,-2:]#.unsqueeze(1)
return [in_xy,in_xz,in_yz]
box_min, box_max = bounding_box
if not torch.all(xyz <= box_max) or not torch.all(xyz >= box_min):
# print("ALERT: some points are outside bounding box. Clipping them!")
pdb.set_trace()
xyz = torch.clamp(xyz, min=box_min, max=box_max)
grid_size = (box_max-box_min)/resolution
bottom_left_idx = torch.floor((xyz-box_min)/grid_size).int() #(B, 3)
voxel_min_vertex = bottom_left_idx*grid_size + box_min
voxel_max_vertex = voxel_min_vertex + torch.tensor([1.0,1.0,1.0])*grid_size
# hashed_voxel_indices = [] # B x 8 ... 000,001,010,011,100,101,110,111
# for i in [0, 1]:
# for j in [0, 1]:
# for k in [0, 1]:
# vertex_idx = bottom_left_idx + torch.tensor([i,j,k])
# # vertex = bottom_left + torch.tensor([i,j,k])*grid_size
# hashed_voxel_indices.append(hash(vertex_idx, log2_hashmap_size))
#voxel_indices = bottom_left_idx.unsqueeze(1) + BOX_OFFSETS #(B, 8, 3)
#hashed_voxel_indices = hash(voxel_indices, log2_hashmap_size) #(B, 8)
voxel_indices_xy = bottom_left_idx[:,:2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_xz = bottom_left_idx[:,::2].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
voxel_indices_yz = bottom_left_idx[:,-2:].unsqueeze(1) + SQR_OFFSETS #(B, 4, 2)
#hashed_voxel_indices_xy = hash2d(voxel_indices_xy, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_xz = hash2d(voxel_indices_xz, log2_hashmap_size) #(B, 4)
#hashed_voxel_indices_yz = hash2d(voxel_indices_yz, log2_hashmap_size) #(B, 4)
hashed_voxel_indices_xy = xy2index(voxel_indices_xy,resolution) #(B, 4)
hashed_voxel_indices_xz = xy2index(voxel_indices_xz,resolution) #(B, 4)
hashed_voxel_indices_yz = xy2index(voxel_indices_yz,resolution) #(B, 4)
#print(hashed_voxel_indices_yz.shape)
#pdb.set_trace()
hashed_voxel_indices = [hashed_voxel_indices_xy,
hashed_voxel_indices_xz,
hashed_voxel_indices_yz]
voxel_min_vertex = box2plane(voxel_min_vertex)
voxel_max_vertex = box2plane(voxel_max_vertex)
return voxel_min_vertex, voxel_max_vertex, hashed_voxel_indices
if __name__=="__main__":
with open("data/nerf_synthetic/chair/transforms_train.json", "r") as f:
camera_transforms = json.load(f)
bounding_box = get_bbox3d_for_blenderobj(camera_transforms, 800, 800)
|
py | b40914fefbd881492c5c0f1402a213d37dd014c8 | #
# Copyright The NOMAD Authors.
#
# This file is part of NOMAD.
# See https://nomad-lab.eu for further info.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nomad.metainfo import Environment
from . import gamess
m_env = Environment()
m_env.m_add_sub_section(Environment.packages, gamess.m_package)
|
py | b4091558e11240f7572795ddf4d27677c593793f | # coding=utf-8
# Copyright 2018 Google AI, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
""" Tokenization classes for Camembert model."""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import logging
import os
from shutil import copyfile
import sentencepiece as spm
from transformers.tokenization_utils import PreTrainedTokenizer
logger = logging.getLogger(__name__)
VOCAB_FILES_NAMES = {'vocab_file': 'sentencepiece.bpe.model'}
PRETRAINED_VOCAB_FILES_MAP = {
'vocab_file':
{
'camembert-base': "https://s3.amazonaws.com/models.huggingface.co/bert/camembert-base-sentencepiece.bpe.model",
}
}
PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES = {
'camembert-base': None,
}
class CamembertTokenizer(PreTrainedTokenizer):
"""
Adapted from RobertaTokenizer and XLNetTokenizer
SentencePiece based tokenizer. Peculiarities:
- requires `SentencePiece <https://github.com/google/sentencepiece>`_
"""
vocab_files_names = VOCAB_FILES_NAMES
pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP
max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__(self, vocab_file, bos_token="<s>", eos_token="</s>", sep_token="</s>",
cls_token="<s>", unk_token="<unk>", pad_token='<pad>', mask_token='<mask>',
additional_special_tokens=['<s>NOTUSED', '</s>NOTUSED'], **kwargs):
super(CamembertTokenizer, self).__init__(max_len=512, bos_token=bos_token, eos_token=eos_token, unk_token=unk_token,
sep_token=sep_token, cls_token=cls_token, pad_token=pad_token,
mask_token=mask_token, additional_special_tokens=additional_special_tokens,
**kwargs)
self.max_len_single_sentence = self.max_len - 2 # take into account special tokens
self.max_len_sentences_pair = self.max_len - 4 # take into account special tokens
self.sp_model = spm.SentencePieceProcessor()
self.sp_model.Load(str(vocab_file))
self.vocab_file = vocab_file
# HACK: These tokens were added by fairseq but don't seem to be actually used when duplicated in the actual
# sentencepiece vocabulary (this is the case for <s> and </s>
self.fairseq_tokens_to_ids = {'<s>NOTUSED': 0, '<pad>': 1, '</s>NOTUSED': 2, '<unk>': 3}
self.fairseq_offset = len(self.fairseq_tokens_to_ids)
self.fairseq_tokens_to_ids['<mask>'] = len(self.sp_model) + len(self.fairseq_tokens_to_ids)
self.fairseq_ids_to_tokens = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):
"""
Build model inputs from a sequence or a pair of sequence for sequence classification tasks
by concatenating and adding special tokens.
A RoBERTa sequence has the following format:
single sequence: <s> X </s>
pair of sequences: <s> A </s></s> B </s>
"""
if token_ids_1 is None:
return [self.cls_token_id] + token_ids_0 + [self.sep_token_id]
cls = [self.cls_token_id]
sep = [self.sep_token_id]
return cls + token_ids_0 + sep + sep + token_ids_1 + sep
def get_special_tokens_mask(self, token_ids_0, token_ids_1=None, already_has_special_tokens=False):
"""
Retrieves sequence ids from a token list that has no special tokens added. This method is called when adding
special tokens using the tokenizer ``prepare_for_model`` or ``encode_plus`` methods.
Args:
token_ids_0: list of ids (must not contain special tokens)
token_ids_1: Optional list of ids (must not contain special tokens), necessary when fetching sequence ids
for sequence pairs
already_has_special_tokens: (default False) Set to True if the token list is already formated with
special tokens for the model
Returns:
A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
"""
if already_has_special_tokens:
if token_ids_1 is not None:
raise ValueError("You should not supply a second sequence if the provided sequence of "
"ids is already formated with special tokens for the model.")
return list(map(lambda x: 1 if x in [self.sep_token_id, self.cls_token_id] else 0, token_ids_0))
if token_ids_1 is None:
return [1] + ([0] * len(token_ids_0)) + [1]
return [1] + ([0] * len(token_ids_0)) + [1, 1] + ([0] * len(token_ids_1)) + [1]
def create_token_type_ids_from_sequences(self, token_ids_0, token_ids_1=None):
"""
Creates a mask from the two sequences passed to be used in a sequence-pair classification task.
A RoBERTa sequence pair mask has the following format:
0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1
| first sequence | second sequence
if token_ids_1 is None, only returns the first portion of the mask (0's).
"""
sep = [self.sep_token_id]
cls = [self.cls_token_id]
if token_ids_1 is None:
return len(cls + token_ids_0 + sep) * [0]
return len(cls + token_ids_0 + sep + sep) * [0] + len(token_ids_1 + sep) * [1]
@property
def vocab_size(self):
return len(self.fairseq_tokens_to_ids) + len(self.sp_model)
def _tokenize(self, text):
return self.sp_model.EncodeAsPieces(text)
def _convert_token_to_id(self, token):
""" Converts a token (str/unicode) in an id using the vocab. """
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
elif self.sp_model.PieceToId(token) == 0:
# Convert sentence piece unk token to fairseq unk token index
return self.unk_token_id
return self.fairseq_offset + self.sp_model.PieceToId(token)
def _convert_id_to_token(self, index):
"""Converts an index (integer) in a token (string/unicode) using the vocab."""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset)
def save_vocabulary(self, save_directory):
""" Save the sentencepiece vocabulary (copy original file) and special tokens file
to a directory.
"""
if not os.path.isdir(save_directory):
logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
return
out_vocab_file = os.path.join(save_directory, VOCAB_FILES_NAMES['vocab_file'])
if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):
copyfile(self.vocab_file, out_vocab_file)
return (out_vocab_file,)
|
py | b40916c5ccc8b48b2c45ee8f99a221d71ec9ba9b | from tqdm import tqdm
import numpy as np
import argparse
import os
parser = argparse.ArgumentParser(
description='Binarize labels of extreme prediction data sets.')
parser.add_argument('input_dir', help='Path to input directory')
args = parser.parse_args()
def load_dat(path):
print('Reading in file "%s" ...' % path)
with open(path) as in_file:
num_lines, num_categories = (int(i)
for i in in_file.readline().split(' '))
print('=> %d multi-label predictions over %d categories.' %
(num_lines, num_categories))
dat = np.empty(num_lines, dtype=np.int32)
for i in tqdm(range(num_lines)):
labels_values = np.array(
list(int(j)
for i in in_file.readline().split(' ')
for j in i.split(':')),
dtype=np.int32).reshape((-1, 2))
assert np.all(labels_values[:, 1] == 1)
dat[i] = labels_values[:, 0].min()
# Make sure that we've reached EOF
assert len(in_file.read(1)) == 0
return dat, num_categories
def save_dat(dat, inv_perm, num_nonzero, path):
print('Writing to file "%s" ...' % path)
with open(path, 'wb') as out_file:
np.array([len(dat), num_nonzero], dtype=np.int32).tofile(out_file)
np.fromiter((inv_perm[dat_i]
for dat_i in dat), np.int32).tofile(out_file)
trn_dat, trn_cat = load_dat(os.path.join(args.input_dir, 'trn_lbl_mat.txt'))
counts = np.zeros((trn_cat,), np.int32)
for i in trn_dat:
counts[i] += 1
permutation = (-counts).argsort()
num_nonzero = np.sum(counts != 0)
print('Found %d used and %d unused labels' %
(num_nonzero, len(counts) - num_nonzero))
inv_perm = np.empty_like(permutation)
for i, j in enumerate(permutation):
inv_perm[j] = i
save_dat(trn_dat, inv_perm, num_nonzero, os.path.join(
args.input_dir, 'train-labels-first.np'))
tst_dat, tst_cat = load_dat(os.path.join(args.input_dir, 'tst_lbl_mat.txt'))
assert tst_cat == trn_cat
holdout_idxs = np.fromiter((i for i, dat_i in enumerate(tst_dat)
if inv_perm[dat_i] < num_nonzero), np.int32)
rng = np.random.RandomState(36528475)
holdout_idxs = holdout_idxs[rng.permutation(len(holdout_idxs))]
num_valid = round(0.2 * len(holdout_idxs))
valid_idxs = holdout_idxs[:num_valid]
test_idxs = holdout_idxs[num_valid:]
sel_path = os.path.join(args.input_dir, 'valid-indices.np')
print('Writing indices of %d validation points to "%s" ...' %
(len(valid_idxs), sel_path))
with open(sel_path, 'wb') as f:
np.array([len(valid_idxs)], dtype=np.int32).tofile(f)
valid_idxs.tofile(f)
sel_path = os.path.join(args.input_dir, 'test-indices.np')
print('Writing indices of %d test points to "%s" ...' %
(len(test_idxs), sel_path))
with open(sel_path, 'wb') as f:
np.array([len(test_idxs)], dtype=np.int32).tofile(f)
test_idxs.tofile(f)
save_dat(tst_dat[valid_idxs], inv_perm, num_nonzero, os.path.join(
args.input_dir, 'valid-labels-first.np'))
save_dat(tst_dat[test_idxs], inv_perm, num_nonzero, os.path.join(
args.input_dir, 'test-labels-first.np'))
print('Done.')
|
py | b40918b813a62fa3937073fa676badb7e243c402 | # Editing excel spreadsheets
import openpyxl
print("Now we'll create a new excel spreadsheet")
print('I need to read the current openpyxl module documentation "http://openpyxl.readthedocs.io/en/stable/tutorial.html"')
wb = openpyxl.Workbook()
wb
print(wb.get_sheet_names())
sheet = wb.get_sheet_by_name('Sheet')
print(sheet)
cellA1 = sheet['A1'].value
print(cellA1)
cellA1 = 42
print(cellA1)
cellA2 = sheet['A2'].value
cellA2 = 'Hello'
print(cellA2)
import os
os.chdir('c:\\users\\drew\\downloads')
cwd = os.getcwd()
print(cwd)
wb.save('editexample.xlsx')
sheet2 = wb.create_sheet()
print(wb.get_sheet_names())
print(sheet2.title)
sheet2.title = "NewName"
print(wb.get_sheet_names())
wb.save('editexample2.xlsx')
|
py | b40919812d9616207f18f79cd100502e858060d8 | '''
This is a general guideline and instruction for
performing the analysis on Horse Racing in HK
Crucial process are described below:
------------------------------------------------------
1) Web scraping from http://www.hkjc.com/english/racing/SelectHorse.asp
and related data sources
2) Data munging so as to feed the model
3) Perform analysis using different models
Promising tries including Regression,Decision Trees,
Monte Carlo,Ramdon Forest,etc..
4) Visualization and evaluation
5) Optimization and fine-tuning so as to get better outcome
6) To be continued...
'''
|
py | b40919bace218c33f1139a29e3573177409d29af | # copyright: European Organization for Nuclear Research (CERN)
# @author:
# - Vincent Garonne, <[email protected]>, 2011-1013
# @contact: U{[email protected]<mailto:[email protected]>}
# @license: Licensed under the Apache License, Version 2.0 (the "License");
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at U{http://www.apache.org/licenses/LICENSE-2.0}
"""
Installation script Rucio's development virtualenv
"""
import errno
import optparse
import os
import subprocess
import shutil
import sys
ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
VENV = os.path.join(ROOT, '.venv')
PIP_REQUIRES = os.path.join(ROOT, 'tools', 'pip-requires')
PIP_REQUIRES_CLIENT = os.path.join(ROOT, 'tools', 'pip-requires-client')
PIP_REQUIRES_TEST = os.path.join(ROOT, 'tools', 'pip-requires-test')
def die(message, *args):
print >> sys.stderr, message % args
sys.exit(1)
def run_command(cmd, redirect_output=True, check_exit_code=True, shell=False):
"""
Runs a command in an out-of-process shell, returning the
output of that command. Working directory is ROOT.
"""
if redirect_output:
stdout = subprocess.PIPE
else:
stdout = None
proc = subprocess.Popen(cmd, cwd=ROOT, stdout=stdout, shell=shell)
output = proc.communicate()[0]
if check_exit_code and proc.returncode != 0:
die('Command "%s" failed.\n%s', ' '.join(cmd), output)
return output
HAS_EASY_INSTALL = bool(run_command(['which', 'easy_install'], check_exit_code=False).strip())
HAS_VIRTUALENV = bool(run_command(['which', 'virtualenv'], check_exit_code=False).strip())
HAS_PIP = bool(run_command(['which', 'pip'], check_exit_code=False).strip())
HAS_CURL = bool(run_command(['which', 'curl'], check_exit_code=False).strip())
def check_dependencies():
"""Make sure virtualenv is in the path."""
if not HAS_VIRTUALENV:
print 'virtualenv not found.'
# Try installing it via curl/pip/easy_install...
if HAS_PIP:
print 'Installing virtualenv via pip...',
if not run_command(['which', 'pip']):
die('ERROR: virtualenv not found.\n\n'
'Rucio development requires virtualenv, please install'
' it using your favorite package management tool')
else:
if not run_command(['pip', 'install', 'virtualenv']).strip():
die("Failed to install virtualenv.")
print 'done.'
elif HAS_EASY_INSTALL:
print 'Installing virtualenv via easy_install...',
if not run_command(['which', 'easy_install']):
die('ERROR: virtualenv not found.\n\n'
'Rucio development requires virtualenv, please install'
' it using your favorite package management tool')
else:
if not run_command(['easy_install', 'virtualenv']).strip():
die("Failed to install virtualenv.")
print 'done.'
print 'done.'
def create_virtualenv(venv=VENV):
"""
Creates the virtual environment and installs PIP only into the
virtual environment
"""
if HAS_VIRTUALENV:
print 'Creating venv...'
run_command(['virtualenv', '-q', '--no-site-packages', VENV])
elif HAS_CURL:
print 'Creating venv via curl...',
if not run_command("curl -s https://raw.github.com/pypa/virtualenv/master/virtualenv.py | %s - --no-site-packages %s" % (sys.executable, VENV), shell=True):
die('Failed to install virtualenv with curl.')
print 'done.'
print 'Installing pip in virtualenv...',
if not run_command(['tools/with_venv.sh', 'pip', 'install', 'pip>=9.0.1']).strip():
die("Failed to install pip.")
print 'done.'
def install_dependencies(venv=VENV, client=False):
print 'Installing dependencies with pip (this can take a while)...'
run_command(['.venv/bin/pip', 'install', '-r', PIP_REQUIRES_CLIENT], redirect_output=False)
if not client:
run_command(['.venv/bin/pip', 'install', '-r', PIP_REQUIRES], redirect_output=False)
run_command(['.venv/bin/pip', 'install', '-r', PIP_REQUIRES_TEST], redirect_output=False)
# Tell the virtual env how to "import rucio"
py_ver = _detect_python_version(venv)
pthfile = os.path.join(venv, "lib", py_ver, "site-packages", "rucio.pth")
f = open(pthfile, 'w')
f.write("%s/lib/\n" % ROOT)
f.close()
def _detect_python_version(venv):
lib_dir = os.path.join(venv, "lib")
for pathname in os.listdir(lib_dir):
if pathname.startswith('python'):
return pathname
raise Exception('Unable to detect Python version')
def create_symlinks(venv=VENV, atlas_clients=False):
print 'Installing binaries symlinks ...'
bin_dir = os.path.join(ROOT, "bin")
venv_bin_dir = os.path.join(venv, "bin")
binaries = os.listdir(bin_dir)
for binary in binaries:
source = os.path.join(bin_dir, binary)
link_name = os.path.join(venv_bin_dir, binary)
try:
os.path.exists(link_name) and source != os.readlink(link_name)
except OSError, e:
if e.errno == errno.EINVAL:
print 'Delete broken symlink: %(link_name)s -> %(source)s' % locals()
os.remove(link_name)
else:
raise e
if not os.path.exists(link_name):
print 'Create the symlink: %(link_name)s -> %(source)s' % locals()
os.symlink(source, link_name)
if atlas_clients:
source = os.path.join(ROOT, "etc")
link_name = os.path.join(venv, "etc")
try:
os.path.exists(link_name) and source != os.readlink(link_name)
except OSError, e:
if e.errno == errno.EINVAL:
print 'Delete broken symlink: %(link_name)s -> %(source)s' % locals()
os.remove(link_name)
else:
raise e
if not os.path.exists(link_name):
print 'Create the symlink: %(link_name)s -> %(source)s' % locals()
os.symlink(source, link_name)
cfg_name = os.path.join(link_name, 'rucio.cfg')
tpl_cfg_name = os.path.join(link_name, 'rucio.cfg.template')
if not os.path.exists(cfg_name):
print 'Configuring Rucio with etc/rucio.cfg.template'
shutil.copy(src=tpl_cfg_name, dst=cfg_name)
def print_help():
help = """
Rucio development environment setup is complete.
Rucio development uses virtualenv to track and manage Python dependencies
while in development and testing.
To activate the Rucio virtualenv for the extent of your current shell session
you can run:
$ source .venv/bin/activate
Or, if you prefer, you can run commands in the virtualenv on a case by case
basis by running:
$ tools/with_venv.sh <your command>
Also, make test will automatically use the virtualenv.
"""
print help
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option("-a", "--atlas-clients", action="store_true", default=False, dest="atlas_clients", help="Setting up a Rucio development environment for ATLAS clients")
(options, args) = parser.parse_args()
# check_dependencies()
create_virtualenv()
install_dependencies(client=options.atlas_clients)
create_symlinks(atlas_clients=options.atlas_clients)
print_help()
|
py | b4091aba0eed07018dac158ecbe4f8619d51010d | """
Copyright 2016 Alberto Sola
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from ..url.urldb import UrlDB
from ..url.url import Url
from ..url import urlutils
class UrlManager:
'''
'''
def __init__(self):
self.db_name = "JadeReaderDB"
self.db = UrlDB( self.db_name )
# --------------------------------------------------------------------------
# URL
# --------------------------------------------------------------------------
def add_url(self, url):
"""
@param [in] url : Url Object
"""
exists = self.db.check_url(url.get_url())
if not exists:
data = (url.get_name(), url.get_url(), url.get_feed(), url.get_category())
self.db.add_url( data )
exists = False
def get_url(self, url):
"""
@param [in] url : string
"""
data = self.db.get_url(url)
print(data)
return "A"
def update_url(self, new_url, old_url):
data = (new_url.get_name(), new_url.get_url(), new_url.get_feed(), new_url.get_category(), old_url.get_url())
self.db.update_url(data)
def del_url(self, url):
"""
Note: the URL must have the scheme (http://, ...), if not,
it won't be deleted.
@param [in] url : string
"""
self.db.del_url(url)
# --------------------------------------------------------------------------
# CATEGORY
# --------------------------------------------------------------------------
def get_category(self, category):
url_data = self.db.get_url_list(category)
urls = []
for url in url_data:
urls.append( Url(url[0],url[1],url[2],url[3]) )
return urls
def get_categories(self):
return self.db.get_categories()
def rename_category(self, old_name, new_name):
self.db.rename_category(old_name,new_name)
def del_category(self,category):
self.db.del_category(category)
|
py | b4091b14908b482e54135d695f275d3ea51cc1f8 | import numpy as np
import os, argparse, pickle, sys
from os.path import exists, join, isfile, dirname, abspath, split
from pathlib import Path
from glob import glob
import logging
import yaml
from .base_dataset import BaseDataset, BaseDatasetSplit
from ..utils import Config, make_dir, DATASET
from ..vis.boundingbox import BoundingBox3D
logging.basicConfig(
level=logging.INFO,
format='%(levelname)s - %(asctime)s - %(module)s - %(message)s',
)
log = logging.getLogger(__name__)
class KITTI(BaseDataset):
"""
KITTI 3D dataset for Object Detection, used in visualizer, training, or test
"""
def __init__(self,
dataset_path,
name='KITTI',
cache_dir='./logs/cache',
use_cache=False,
val_split=3712,
**kwargs):
"""
Initialize
Args:
dataset_path (str): path to the dataset
kwargs:
"""
super().__init__(dataset_path=dataset_path,
name=name,
cache_dir=cache_dir,
use_cache=use_cache,
val_split=val_split,
**kwargs)
cfg = self.cfg
self.name = cfg.name
self.dataset_path = cfg.dataset_path
self.num_classes = 3
self.label_to_names = self.get_label_to_names()
self.all_files = glob(
join(cfg.dataset_path, 'training', 'velodyne', '*.bin'))
self.train_files = []
self.val_files = []
for f in self.all_files:
idx = int(Path(f).name.replace('.bin', ''))
if idx < cfg.val_split:
self.train_files.append(f)
else:
self.val_files.append(f)
self.test_files = glob(
join(cfg.dataset_path, 'testing', 'velodyne', '*.bin'))
@staticmethod
def get_label_to_names():
label_to_names = {0: 'Car', 1: 'Pedestrian', 2: 'Cyclist', 3: 'Van'}
return label_to_names
@staticmethod
def read_lidar(path):
assert Path(path).exists()
return np.fromfile(path, dtype=np.float32).reshape(-1, 4)
@staticmethod
def read_label(path, calib):
if not Path(path).exists():
return None
with open(path, 'r') as f:
lines = f.readlines()
objects = []
for line in lines:
label = line.strip().split(' ')
center = np.array(
[float(label[11]),
float(label[12]),
float(label[13]), 1.0])
rect = calib['R0_rect']
Trv2c = calib['Tr_velo2cam']
points = center @ np.linalg.inv((rect @ Trv2c).T)
size = [float(label[9]), float(label[8]), float(label[10])] # w,h,l
center = [points[0], points[1], size[1] / 2 + points[2]]
ry = float(label[14])
front = [-1 * np.sin(ry), -1 * np.cos(ry), 0]
up = [0, 0, 1]
left = [-1 * np.cos(ry), np.sin(ry), 0]
objects.append(Object3d(center, front, up, left, size, label))
return objects
@staticmethod
def _extend_matrix(mat):
mat = np.concatenate([mat, np.array([[0., 0., 0., 1.]])], axis=0)
return mat
@staticmethod
def read_calib(path):
assert Path(path).exists()
with open(path, 'r') as f:
lines = f.readlines()
obj = lines[0].strip().split(' ')[1:]
P0 = np.array(obj, dtype=np.float32).reshape(3, 4)
obj = lines[1].strip().split(' ')[1:]
P1 = np.array(obj, dtype=np.float32).reshape(3, 4)
obj = lines[2].strip().split(' ')[1:]
P2 = np.array(obj, dtype=np.float32).reshape(3, 4)
obj = lines[3].strip().split(' ')[1:]
P3 = np.array(obj, dtype=np.float32).reshape(3, 4)
P0 = KITTI._extend_matrix(P0)
P1 = KITTI._extend_matrix(P1)
P2 = KITTI._extend_matrix(P2)
P3 = KITTI._extend_matrix(P3)
obj = lines[4].strip().split(' ')[1:]
R0 = np.array(obj, dtype=np.float32).reshape(3, 3)
rect_4x4 = np.zeros([4, 4], dtype=R0.dtype)
rect_4x4[3, 3] = 1
rect_4x4[:3, :3] = R0
obj = lines[5].strip().split(' ')[1:]
Tr_velo_to_cam = np.array(obj, dtype=np.float32).reshape(3, 4)
Tr_velo_to_cam = KITTI._extend_matrix(Tr_velo_to_cam)
return {
'P0': P0,
'P1': P1,
'P2': P2,
'P3': P3,
'R0_rect': rect_4x4,
'Tr_velo2cam': Tr_velo_to_cam
}
def get_split(self, split):
return KITTISplit(self, split=split)
def get_split_list(self, split):
cfg = self.cfg
dataset_path = cfg.dataset_path
file_list = []
if split in ['train', 'training']:
return self.train_files
seq_list = cfg.training_split
elif split in ['test', 'testing']:
return self.test_files
elif split in ['val', 'validation']:
return val_files
elif split in ['all']:
return self.train_files + self.val_files + self.test_files
else:
raise ValueError("Invalid split {}".format(split))
def is_tested():
pass
def save_test_result():
pass
class KITTISplit():
def __init__(self, dataset, split='train'):
self.cfg = dataset.cfg
path_list = dataset.get_split_list(split)
log.info("Found {} pointclouds for {}".format(len(path_list), split))
self.path_list = path_list
self.split = split
self.dataset = dataset
def __len__(self):
return len(self.path_list)
def get_data(self, idx):
pc_path = self.path_list[idx]
label_path = pc_path.replace('velodyne',
'label_2').replace('.bin', '.txt')
calib_path = label_path.replace('label_2', 'calib')
pc = self.dataset.read_lidar(pc_path)
calib = self.dataset.read_calib(calib_path)
label = self.dataset.read_label(label_path, calib)
data = {
'point': pc,
'feat': None,
'calib': calib,
'bounding_boxes': label,
}
return data
def get_attr(self, idx):
pc_path = self.path_list[idx]
name = Path(pc_path).name.split('.')[0]
attr = {'name': name, 'path': pc_path, 'split': self.split}
return attr
class Object3d(BoundingBox3D):
"""
Stores object specific details like bbox coordinates, occlusion etc.
"""
def __init__(self, center, front, up, left, size, label):
label_class = self.cls_type_to_id(label[0])
confidence = float(label[15]) if label.__len__() == 16 else -1.0
super().__init__(center, front, up, left, size, label_class, confidence)
self.name = label[0]
self.cls_id = self.cls_type_to_id(self.name)
self.truncation = float(label[1])
self.occlusion = float(
label[2]
) # 0:fully visible 1:partly occluded 2:largely occluded 3:unknown
self.alpha = float(label[3])
self.box2d = np.array((float(label[4]), float(label[5]), float(
label[6]), float(label[7])),
dtype=np.float32)
self.dis_to_cam = np.linalg.norm(self.center)
self.ry = float(label[14])
self.score = float(label[15]) if label.__len__() == 16 else -1.0
self.level = self.get_kitti_obj_level()
@staticmethod
def cls_type_to_id(cls_type):
"""
get object id from name.
"""
type_to_id = {
'DontCare': 0,
'Car': 1,
'Pedestrian': 2,
'Cyclist': 3,
'Van': 4
}
if cls_type not in type_to_id.keys():
return 0
return type_to_id[cls_type]
def get_kitti_obj_level(self):
"""
determines the difficulty level of object.
"""
height = float(self.box2d[3]) - float(self.box2d[1]) + 1
if height >= 40 and self.truncation <= 0.15 and self.occlusion <= 0:
self.level_str = 'Easy'
return 0 # Easy
elif height >= 25 and self.truncation <= 0.3 and self.occlusion <= 1:
self.level_str = 'Moderate'
return 1 # Moderate
elif height >= 25 and self.truncation <= 0.5 and self.occlusion <= 2:
self.level_str = 'Hard'
return 2 # Hard
else:
self.level_str = 'UnKnown'
return -1
def generate_corners3d(self):
"""
generate corners3d representation for this object
:return corners_3d: (8, 3) corners of box3d in camera coord
"""
l, h, w = self.size[2::-1]
x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
R = np.array([[np.cos(self.ry), 0, np.sin(self.ry)], [0, 1, 0],
[-np.sin(self.ry), 0,
np.cos(self.ry)]])
corners3d = np.vstack([x_corners, y_corners, z_corners]) # (3, 8)
corners3d = np.dot(R, corners3d).T
corners3d = corners3d + self.center
return corners3d
def to_str(self):
print_str = '%s %.3f %.3f %.3f box2d: %s hwl: [%.3f %.3f %.3f] pos: %s ry: %.3f' \
% (self.name, self.truncation, self.occlusion, self.alpha, self.box2d, self.size[2], self.size[0], self.size[1],
self.center, self.ry)
return print_str
def to_kitti_format(self):
kitti_str = '%s %.2f %d %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f' \
% (self.name, self.truncation, int(self.occlusion), self.alpha, self.box2d[0], self.box2d[1],
self.box2d[2], self.box2d[3], self.size[2], self.size[0], self.size[1], self.center[0], self.center[1], self.center[2],
self.ry)
return kitti_str
DATASET._register_module(KITTI)
|
py | b4091bb46de4b7f4951d6674269c4c0b281dd6c4 | from utils import *
from intervaltree import IntervalTree
def load(virus):
if virus == 'h1':
escape_fname = ('results/flu/semantics/'
'analyze_semantics_flu_h1_bilstm_512.txt')
region_fname = 'data/influenza/h1_regions.txt'
elif virus == 'h3':
escape_fname = ('results/flu/semantics/'
'analyze_semantics_flu_h3_bilstm_512.txt')
region_fname = 'data/influenza/h3_regions.txt'
elif virus == 'hiv':
escape_fname = ('results/hiv/semantics/'
'analyze_semantics_hiv_bilstm_512.txt')
region_fname = 'data/hiv/bg505_regions.txt'
elif virus == 'sarscov2':
escape_fname = ('results/cov/semantics/'
'analyze_semantics_cov_bilstm_512.txt')
region_fname = 'data/cov/sarscov2_regions.txt'
else:
raise ValueError('Virus {} not supported'.format(virus))
return escape_fname, region_fname
def regional_escape(virus, beta=1., n_permutations=100000):
escape_fname, region_fname = load(virus)
# Parse protein regions, keep track of intervals,
# sizes and scores.
regions = IntervalTree()
name2size, name2escape = {}, {}
with open(region_fname) as f:
f.readline()
for line in f:
[ start, end, name ] = line.rstrip().split()
start, end = int(start) - 1, int(end) - 1
regions[start:(end + 1)] = name
if name not in name2escape:
name2escape[name] = []
name2size[name] = 0
name2size[name] += end - start + 1
# Load semantic data into memory.
data = []
with open(escape_fname) as f:
columns = f.readline().rstrip().split()
for line in f:
if line.rstrip().split()[2] in { 'U', 'B', 'J', 'X', 'Z' }:
continue
data.append(line.rstrip().split('\t'))
df_all = pd.DataFrame(data, columns=columns)
df_all['pos'] = pd.to_numeric(df_all['pos'])
df_all['prob'] = pd.to_numeric(df_all['prob'])
df_all['change'] = pd.to_numeric(df_all['change'])
df_all['acquisition'] = ss.rankdata(df_all.change) + \
(beta * ss.rankdata(df_all.prob))
# Reformat data for easy plotting and P-value computation.
plot_data = []
pos2scores = {}
for i in range(len(df_all)):
pos = df_all['pos'][i]
acquisition = df_all['acquisition'][i]
names = regions[pos]
for name in names:
name2escape[name.data].append(acquisition)
plot_data.append([ name.data, acquisition ])
if pos not in pos2scores:
pos2scores[pos] = []
pos2scores[pos].append(acquisition)
# Compute permutation-based P-value for each region.
seq_start = min(df_all['pos'])
seq_end = max(df_all['pos'])
all_pos = list(range(seq_start, seq_end + 1))
plot_data = []
for name in name2escape:
real_score = np.mean(name2escape[name])
size = name2size[name]
null_distribution = []
for perm in range(n_permutations):
rand_positions = np.random.choice(all_pos, size=size,
replace=False)
null_score = np.concatenate([
np.array(pos2scores[pos]) for pos in rand_positions
]).mean()
null_distribution.append(null_score)
null_distribution = np.array(null_distribution)
tprint('Enriched for escapes:')
p_val = (sum(null_distribution >= real_score)) / \
(n_permutations)
if p_val == 0:
p_val = 1. / n_permutations
tprint('{}, P < {}'.format(name, p_val))
else:
tprint('{}, P = {}'.format(name, p_val))
plot_data.append([ name, -np.log10(p_val), 'enriched' ])
tprint('Depleted for escapes:')
p_val = (sum(null_distribution <= real_score)) / \
(n_permutations)
if p_val == 0:
p_val = 1. / n_permutations
tprint('{}, P < {}'.format(name, p_val))
else:
tprint('{}, P = {}'.format(name, p_val))
plot_data.append([ name, -np.log10(p_val), 'depleted' ])
tprint('')
# Plot each region in bar plot.
plot_data = pd.DataFrame(plot_data,
columns=[ 'region', 'score', 'direction' ])
plt.figure()
sns.barplot(data=plot_data, x='region', y='score', hue='direction',
order=sorted(set(plot_data['region'])))
fdr = 0.05 / len(sorted(set(plot_data['region'])))
plt.axhline(y=-np.log10(fdr), color='gray', linestyle='--')
plt.xticks(rotation=60)
plt.savefig('figures/regional_escape_{}.svg'.format(virus))
if __name__ == '__main__':
virus = sys.argv[1]
regional_escape(virus)
|
py | b4091bea05e2b9f2e78f9f40870c9ac7e8a9cac3 | import os
import cv2
import time
import json
import random
import inspect
import argparse
import numpy as np
from tqdm import tqdm
from dataloaders import make_data_loader
from models.sync_batchnorm.replicate import patch_replication_callback
from models.vs_net import *
from utils.loss import loss_dict
from utils.lr_scheduler import LR_Scheduler
from utils.saver import Saver
from utils.summaries import TensorboardSummary
from utils.metrics import Evaluator
from utils import utils
from torch.autograd import Variable
import os.path as osp
from configs import *
import warnings
warnings.filterwarnings("ignore")
class Trainer(object):
def __init__(self, cfg):
self.cfg = cfg
# Define Saver
self.saver = Saver(cfg)
# Define Tensorboard Summary
self.summary = TensorboardSummary(self.cfg["log_tb_dir"])
self.summary.create_summary()
# Define Dataloader
kwargs = {"num_workers": cfg["num_workers"], "pin_memory": True}
self.train_loader, self.val_loader, self.test_loader, dset = make_data_loader(
cfg, **kwargs)
# read landmark centers
self.id2center = np.array(json.load(
open(osp.join(cfg["data_dir"], "id2centers.json")))).astype(np.float64)
self.coding_book = torch.zeros(
(len(self.id2center), cfg["seg_channel"]), dtype=torch.float32).cuda()
torch.nn.init.xavier_uniform(self.coding_book)
print("coding book size = {}".format(self.coding_book.shape))
# generate color map
unique_label = np.arange(len(self.id2center))
unique_label = unique_label.astype(
np.int64) * 6364136223846793005 + 1442695040888963407
color_map = np.zeros((unique_label.shape[0], 3), np.uint8)
color_map[:, 0] = np.bitwise_and(unique_label, 0xff)
color_map[:, 1] = np.bitwise_and(np.right_shift(unique_label, 4), 0xff)
color_map[:, 2] = np.bitwise_and(np.right_shift(unique_label, 8), 0xff)
self.color_map = np.array(color_map)
self.coding_book = Variable(self.coding_book, requires_grad=True)
# Define network
model = VSNet(backbone=cfg["backbone"],
seg_decoder=cfg["seg_decoder"],
vertex_decoder=cfg["vertex_decoder"],
seg_channel=cfg["seg_channel"],
vertex_channel=cfg["vertex_channel"],
output_stride=cfg["out_stride"],
sync_bn=cfg["sync_bn"])
train_params = [{"params": model.get_1x_lr_params(), "lr": cfg["lr"]},
{"params": model.get_10x_lr_params(),
"lr": cfg["lr"] * 10},
{"params": self.coding_book, "lr": cfg["lr"] * 10}
]
# Define Optimizer
if cfg["optimizer"] == "SGD":
optimizer = torch.optim.SGD(train_params, momentum=cfg["momentum"],
weight_decay=cfg["weight_decay"], nesterov=cfg["nesterov"])
elif cfg["optimizer"] == "Adam":
optimizer = torch.optim.Adam(train_params, lr=cfg["lr"],
weight_decay=cfg["weight_decay"], amsgrad=True)
else:
raise NotImplementedError
# Define Criterion
self.seg_criterion = loss_dict[cfg["seg_loss_type"]]
self.vertex_criterion = loss_dict[cfg["vertex_loss_type"]]
self.model, self.optimizer = model, optimizer
# Define Evaluator
self.evaluator = Evaluator(
self.coding_book.shape[0], cfg["vertex_channel"])
# Define lr scheduler
self.scheduler = LR_Scheduler(mode=cfg["lr_scheduler"], base_lr=cfg["lr"],
num_epochs=cfg["epochs"], iters_per_epoch=len(
self.train_loader),
lr_step=cfg["lr_step"])
self.model = torch.nn.DataParallel(self.model)
patch_replication_callback(self.model)
self.model = self.model.cuda()
# Resuming checkpoint
self.best_pred = {"mIoU": 0.0, "Acc": 0.0, "Acc": 0.0,
"FWIoU": 0.0, "translation_median": 1000}
if cfg["resume"] is not None and cfg["resume"] == True:
print(os.path.isfile(cfg["resume_checkpoint"]))
if not os.path.isfile(cfg["resume_checkpoint"]):
raise RuntimeError("=> no checkpoint found at {}" .format(
cfg["resume_checkpoint"]))
checkpoint = torch.load(cfg["resume_checkpoint"])
cfg.opt["start_epoch"] = checkpoint["epoch"] - 1
self.model.module.load_state_dict(checkpoint["state_dict"])
if not cfg["ft"]:
self.optimizer.load_state_dict(checkpoint["optimizer"])
self.best_pred = checkpoint["best_pred"]
if "coding_book" in checkpoint.keys():
assert self.coding_book.shape == checkpoint["coding_book"].shape
self.coding_book = checkpoint["coding_book"]
else:
print("Alert! coding book does not exist in the checkpoint")
print("=> loaded checkpoint {} (epoch {})"
.format(cfg["resume"], checkpoint["epoch"]))
def validation(self, epoch):
print("=================================")
print("validation")
print("=================================")
self.model.eval()
self.evaluator.reset()
tbar = tqdm(self.val_loader, desc="\r")
num_iter_val = len(self.val_loader)
test_loss = 0.0
num_images = 0
ten_count = []
five_count = []
three_count = []
one_count = []
translation_list = []
angular_list = []
reproject_list = []
test_seg_loss = 0.0
test_ver_loss = 0.0
for i, data in enumerate(tbar):
image, seg_target, vertex_target = [d.cuda() for d in data[:3]]
valid_mask = data[-1].cuda()
pose_target, camera_k_matrix, ori_img = data[3:]
seg_target = seg_target.long()
valid_mask = (seg_target.detach() > 0).float()
with torch.no_grad():
seg_pred, vertex_pred, seg_pred_x4s = self.model(
image)
loss_seg = 0
if self.cfg["seg_decoder"]:
loss_seg = self.seg_criterion(seg_pred, seg_target, self.coding_book,
margin=self.cfg["seg_loss_margin"],
seg_k=self.cfg["seg_k"],
valid_mask=valid_mask)
test_seg_loss += loss_seg.item()
self.summary.add_scalar(
"val/loss_seg_iter", loss_seg.item(), i + num_iter_val * epoch)
loss_vertex = 0
if self.cfg["vertex_decoder"]:
loss_vertex = self.vertex_criterion(vertex_pred, vertex_target,
valid_mask)
test_ver_loss += loss_vertex.item()
self.summary.add_scalar(
"val/loss_vertex_iter", loss_vertex.item(), i + num_iter_val * epoch)
loss = 0
if self.cfg["seg_decoder"]:
loss += loss_seg
if self.cfg["vertex_decoder"]:
loss += loss_vertex * self.cfg["vertex_loss_ratio"]
test_loss += loss.item()
tbar.set_description("Test loss: %.9f" % (test_loss / (i + 1)))
self.summary.add_scalar(
"val/total_loss_iter", loss.item(), i + num_iter_val * epoch)
global_step = i * \
self.cfg["val_batch_size"] + image.data.shape[0]
# evaluate seg_pred
seg_target = seg_target.detach().squeeze()
if self.cfg["seg_decoder"]:
seg_pred, knn = utils.evaluate_segmentation(seg_pred_x4s,
self.coding_book, seg_target.size(), self.cfg["use_own_nn"])
else:
seg_pred = seg_target
# evaluate vertex
pt3d_filter, pt2d_filter, _ = utils.evaluate_vertex_v2(vertex_pred, seg_pred,
self.id2center, inlier_thresh=0.999,
min_mask_num=self.cfg["val_label_filter_threshsold"])
# pt3d_filter, pt2d_filter = utils.evaluate_vertex(vertex_target, seg_pred, self.id2center)
camera_k_matrix = camera_k_matrix.squeeze().numpy()
translation_distance, angular_distance, error = 1e9, 1e9, 1e9
if pt2d_filter.shape[0] > 6:
# pnp
ret, pose_pred = utils.pnp(
pt3d_filter, pt2d_filter, camera_k_matrix)
error = utils.reproject_error(
pt3d_filter, pt2d_filter, pose_pred, camera_k_matrix)
translation_distance, angular_distance = utils.cm_degree_metric(
pose_pred, pose_target)
print(translation_distance, angular_distance, error, i)
ten_count.append(translation_distance <
10 and angular_distance < 10)
five_count.append(translation_distance <
5 and angular_distance < 5)
three_count.append(translation_distance <
3 and angular_distance < 3)
one_count.append(translation_distance <
1 and angular_distance < 1)
translation_list.append(translation_distance)
angular_list.append(angular_distance)
reproject_list.append(error)
# Add batch sample into evaluator
if self.cfg["seg_decoder"]:
self.evaluator.add_seg_batch(seg_target, seg_pred)
if self.cfg["visualize_segmenation"]:
self.summary.visualize_seg_image(ori_img, seg_pred, seg_target,
epoch, i, global_step, self.color_map)
if self.cfg["vertex_decoder"]:
# evaluate vertex_pred
vertex_target, vertex_pred = vertex_target.squeeze(), vertex_pred.squeeze()
self.evaluator.add_vertex_batch(vertex_target, vertex_pred)
# vertex acc的计算
if self.cfg["visualize_voting"]:
if self.cfg["visualize_landmark"] != None and self.cfg["visualize_landmark"]:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step, pt2d_filter, True)
else:
self.summary.visualize_vertex_image(ori_img, vertex_pred, vertex_target,
epoch, i, global_step)
mIoU, Acc, Acc_class, FWIoU = self.summary.visualize_seg_evaluator(
self.evaluator, epoch, "val/seg/")
print("Validation:")
print("[Epoch: %d, numImages: %5d]" % (epoch, num_images))
print("Loss: %.9f" % (test_loss / num_iter_val))
self.summary.add_scalar("val/total_loss_epoch",
test_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_seg_epoch",
test_seg_loss / num_iter_val, epoch)
self.summary.add_scalar("val/total_ver_epoch",
test_ver_loss / num_iter_val, epoch)
self.summary.add_scalar("val/pnp/10cm_epoch",
np.mean(ten_count), epoch)
self.summary.add_scalar("val/pnp/5cm_epoch",
np.mean(five_count), epoch)
self.summary.add_scalar("val/pnp/3cm_epoch",
np.mean(three_count), epoch)
self.summary.add_scalar("val/pnp/1cm_epoch", np.mean(one_count), epoch)
self.summary.add_scalar(
"val/pnp/translation_median_epoch", np.median(translation_list), epoch)
self.summary.add_scalar(
"val/pnp/angular_median_epoch", np.median(angular_list), epoch)
new_pred = {"mIoU": mIoU.item(), "Acc": Acc.item(), "Acc_class": Acc_class.item(), "FWIoU": FWIoU.item(),
"10cm": np.mean(ten_count),
"5cm": np.mean(five_count), "3cm": np.mean(three_count), "1cm": np.mean(one_count),
"translation_median": np.median(translation_list), "angular_list": np.median(angular_list)}
print(new_pred)
if new_pred["translation_median"] < self.best_pred["translation_median"]:
is_best = True
self.best_pred = new_pred
self.saver.save_checkpoint({
"epoch": epoch + 1,
"state_dict": self.model.module.state_dict(),
"optimizer": self.optimizer.state_dict(),
"best_pred": self.best_pred,
"coding_book": self.coding_book
}, is_best, save_model=self.cfg["save_model"])
def main():
parser = argparse.ArgumentParser(
description="PyTorch Landmark Segmentation Training")
parser.add_argument("--dataset", type=str,
choices=["7scenes_loc", "cambridge_loc"], help="experiment config file")
parser.add_argument("--scene", type=str, default="",
help="experiment scene")
parser.add_argument("--gpu-id", type=str, default="",
help="experiment gpu id")
parser.add_argument("--use-aug", type=str, default="",
choices=["", "true", "false"], help="experiment use aug")
parser.add_argument("--config", type=str, default=None,
help="experiment config file")
parser.add_argument("--debug", type=str, default="",
choices=["", "true", "false"], help="debug")
parser.add_argument("--resume", type=str, default="true",
choices=["", "true", "false"], help="resume")
args = parser.parse_args()
debug = None
if args.debug != "":
debug = (args.debug == "true")
if args.dataset == "7scenes_loc":
cfg = SevenScenesLocConfig(args.config, debug)
elif args.dataset == "cambridge_loc":
cfg = CambridgeLocConfig(args.config, debug)
if args.scene != "":
cfg.opt["scene"] = args.scene
if args.gpu_id != "":
cfg.opt["devices"] = args.gpu_id
if args.use_aug == "true":
cfg.opt["use_aug"] = True
if args.resume == "true":
cfg.opt["resume"] = True
cfg.opt["resume_checkpoint"] = cfg["export_dir"] + \
'/ckpts/checkpoint-backup.pth.tar'
cfg.print_opt()
cfg.set_environmental_variables()
torch.manual_seed(cfg["seed"])
torch.cuda.manual_seed(cfg["seed"])
np.random.seed(cfg["seed"])
random.seed(cfg["seed"])
trainer = Trainer(cfg)
print("Starting Epoch:", trainer.cfg["start_epoch"])
print("Total Epoches:", trainer.cfg["epochs"])
trainer.validation(trainer.cfg["start_epoch"])
trainer.summary.close()
if __name__ == "__main__":
main()
|
py | b4091cf84e24b754fbf084d19e93337c908696ae | from django.db import models
from gcloudc.db.models.fields.related import RelatedSetField
from gcloudc.db.models.fields.json import JSONField
from .document import Document
from .constants import WORD_DOCUMENT_JOIN_STRING
class DocumentRecord(models.Model):
"""
'Document' is intentionally not a model;
it would ruin the abstraction, and we need to
store all kinds of data related to a Document.
So instead, each Document has an instance of DocumentRecord
this is the closest to a database representation of the doc
and indeed, is where the document ID comes from.
DocumentRecord exists to keep a reference to all token indexes
and any stats/settings about the document (e.g. its rank).
"""
index_stats = models.ForeignKey("IndexStats", on_delete=models.CASCADE)
# This allows for up-to 10000 unique terms in a single
# document. We need this data when deleting a document
# from the index
token_field_indexes = RelatedSetField("TokenFieldIndex")
# This is the data at the time the field was indexed so the doc
# can be reconstructed on fetch
data = JSONField()
class TokenFieldIndex(models.Model):
# key should be of the format WWWW|XXXX|YYYY|ZZZZ where:
# WWWW = index ID
# XXXX = normalised token
# YYYY = field_name
# ZZZZ = document id
# Querying for documents or fields containing the token
# will just be a key__startswith query (effectively)
id = models.CharField(primary_key=True, max_length=1500, default=None)
index_stats = models.ForeignKey("IndexStats", on_delete=models.CASCADE)
record = models.ForeignKey("DocumentRecord", on_delete=models.CASCADE)
token = models.CharField(max_length=500)
field_name = models.CharField(max_length=500)
@classmethod
def document_id_from_pk(cls, pk):
"""
Given a PK in the right format, return the document ID
"""
if pk is None:
return None
return int(pk.split(WORD_DOCUMENT_JOIN_STRING)[-1])
@property
def document_id(self):
return self.record_id
@property
def document(self):
return Document.objects.get(pk=self.document_id)
def save(self, *args, **kwargs):
assert(self.token.strip()) # Check we're not indexing whitespace or nothing
assert(WORD_DOCUMENT_JOIN_STRING not in self.token) # Don't index this special symbol
orig_pk = self.pk
self.pk = WORD_DOCUMENT_JOIN_STRING.join(
[str(x) for x in (self.index_stats_id, self.token, self.field_name, self.document_id)]
)
# Just check that we didn't *change* the PK
assert((orig_pk is None) or orig_pk == self.pk)
super().save(*args, **kwargs)
class IndexStats(models.Model):
"""
This is a representation of the index
in the datastore. Its PK is used as
a prefix to documents and token tables
but it's only really used itself to maintain
statistics about the indexed data.
"""
name = models.CharField(max_length=100, unique=True)
document_count = models.PositiveIntegerField(default=0)
|
py | b4091d583bf6a1e4e2281f4e42f5841e68af5d0a | import discord
from discord.ext import commands
import asyncio
import importlib
import inspect
import itertools
import os
import random
import re
import subprocess
import textwrap
import utils
class DPYSource:
__slots__ = ('file', 'obj', 'parent', 'index', 'path', 'match')
def __init__(self, **attrs):
self.file = attrs.get('file')
self.obj = attrs.get('obj')
self.parent = attrs.get('parent')
self.index = attrs.get('index')
self.path = attrs.get('path')
self.match = attrs.get('match')
class Colour(metaclass=utils.MetaCog, category='API', colour=random.randint(0, 16581375),
thumbnail='https://i.imgur.com/g2LpJZb.png'):
"""Somewhere over the rainbow... An Eviee awaits!
These commands should help you feel all nice inside."""
def __init__(self, bot):
self.bot = bot
self.BASE = 'http://www.thecolorapi.com/id?format=json&hex={}'
self.BASE_S = 'http://www.colourlovers.com/api/palettes?hex={}&format=json'
_invalid = ('#', '0x', 'rgb', '(', ')', ',', ' ')
self._invalid = dict((re.escape(_), '' if _ not in (',', ' ') else ' ') for _ in _invalid)
self._pattern = re.compile("|".join(self._invalid.keys()))
def get_hex(self, value):
value = self._pattern.sub(lambda m: self._invalid[re.escape(m.group(0))], value)
try:
value = '%02x%02x%02x' % tuple(map(int, value.split()))
except (ValueError, TypeError):
pass
return value
@commands.command(name='colour', aliases=['color'], cls=utils.AbstractorGroup, abstractors=['show'])
async def _colour(self, ctx, *, value: str):
"""Retrieve information on a colour and get a scheme, from an RGB or HEX value.
[!This command is also a Base Command!]
Aliases
--------
color
Parameters
------------
value: [rgb, HEX]
The colour value to retrieve data for. This can be formatted in multiple ways.
Examples
----------
<prefix>colour <value>
<prefix>show colour <value>
{ctx.prefix}colour rgb(255, 255, 255)
{ctx.prefix}show colour #FFFFFF
{ctx.prefix}colour rgb 255,255,255
{ctx.prefix}colour 255 255 255
"""
await ctx.trigger_typing()
orig = value
value = self.get_hex(value.lower())
efmt = f'I could not find any colour matching value: **`{orig}`**\n' \
f'```css\n[Your colour is either invalid or not supported. Please try again. ' \
f'Supported Formats: RGB or HEX]\n```'
try:
resp, data = await self.bot.aio('get', self.BASE.format(value), return_attr='json')
except Exception as e:
return await ctx.send(f'There was an error processing your request.\n```css\n[{e}]\n```')
if resp.status != 200:
return await ctx.send(efmt)
try:
_hex = data['hex']['clean']
except KeyError:
return await ctx.send(efmt)
if _hex.lower() != value:
return await ctx.send(efmt)
resp_s, data_s = await self.bot.aio('get', self.BASE_S.format(value), return_attr='json')
try:
image = data_s[0]['imageUrl']
colours = data_s[0]['colors']
except (IndexError, KeyError):
image = f'https://dummyimage.com/300/{data["hex"]["clean"]}.png'
colours = None
try:
emcol = int(f"0x{_hex}", 0)
except ValueError:
return await ctx.send(efmt)
embed = discord.Embed(title=f'Colour - {data["name"]["value"]}', colour=emcol)
embed.set_thumbnail(url=f'https://dummyimage.com/150/{data["hex"]["clean"]}.png')
embed.set_image(url=image)
embed.add_field(name='HEX', value=f'{data["hex"]["value"]}')
embed.add_field(name='RGB', value=f'{data["rgb"]["value"]}')
embed.add_field(name='HSL', value=f'{data["hsl"]["value"]}')
embed.add_field(name='HSV', value=f'{data["hsv"]["value"]}')
embed.add_field(name='CMYK', value=f'{data["cmyk"]["value"]}')
embed.add_field(name='XYZ', value=f'{data["XYZ"]["value"]}')
if colours:
embed.add_field(name='Scheme:', value=' | '.join(colours), inline=False)
await ctx.send(embed=embed)
@_colour.command(name='show')
async def show_colour(self, ctx, *, value: str):
"""An aliases and base command to colour."""
await ctx.invoke(self.bot.get_command('colour'), value=value)
class Source(metaclass=utils.MetaCog, category='API', thumbnail='https://i.imgur.com/DF5ZfSh.png'):
"""Commands which allow you to Get that Juicy Sauce Code from various locations."""
def __init__(self, bot):
self.bot = bot
self.rtfs_anchors = None
self.rtfs_revision = None
bot.loop.create_task(self._update_rtfs())
async def get_rtfs_revision(self):
cmd = r'git ls-remote https://github.com/Rapptz/discord.py --tags rewrite HEAD~1..HEAD --format="%s (%cr)"'
if os.name == 'posix':
cmd = cmd.format(r'\`%h\`')
else:
cmd = cmd.format(r'`%h`')
revision = os.popen(cmd).read().strip()
return revision.split()[0]
def rtfs_embed(self, search, matches):
if not matches:
embed = discord.Embed(title=f'RTFS - <{search}>',
description=f'Sorry no results were found for {search}\n\nTry being more specific.',
colour=0x6dc9c9)
embed.add_field(name='Discord.py Source:', value='https://github.com/Rapptz/discord.py/tree/rewrite/')
else:
matches = '\n'.join(matches)
embed = discord.Embed(title=f'RTFS - <{search}>', description=f'{matches}', colour=0x6dc9c9)
return embed
async def _update_rtfs(self):
while not self.bot.is_closed():
try:
revision = await self.get_rtfs_revision()
except Exception:
await asyncio.sleep(600)
continue
if not self.rtfs_revision:
pass
elif self.rtfs_revision == revision:
await asyncio.sleep(3600)
continue
if os.name == 'nt':
await self._rtfs_load()
return
try:
cmd = 'python3.6 -m pip install -U git+https://github.com/Rapptz/discord.py.git@rewrite'
process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output, error = process.communicate()
process.kill()
except Exception:
pass
await self._rtfs_load()
async def _rtfs_load(self):
self.rtfs_revision = await self.get_rtfs_revision()
anchors = []
parent = None
pf = r'def(.*?[a-zA-Z0-9])\(.*\)|async def(.*?[a-zA-Z0-9])\(.*\)'
pc = r'class (.*[a-zA-Z0-9])[\:\(]'
def pred(y):
if inspect.isbuiltin(y):
return
try:
return 'discord' in y.__name__
except AttributeError:
return False
importlib.reload(discord)
mods = inspect.getmembers(discord, pred) + inspect.getmembers(commands, pred)
for x in mods:
file = x[1].__name__.split('.')[-1]
path = '/'.join(x[1].__name__.split('.')[:-1])
try:
src = inspect.getsourcelines(x[1])
except TypeError:
continue
for index, line in enumerate(src[0]):
orig = line
if sum(1 for _ in itertools.takewhile(str.isspace, line)) > 4:
continue
elif line == 0 or '__' in line:
continue
line = line.lstrip(' ')
match = re.match(pf, line)
if match:
if sum(1 for _ in itertools.takewhile(str.isspace, orig)) < 4:
parent = None
elif not match:
match = re.match(pc, line)
if match:
parent = match.group(1)
try:
obj = match.group(1) or match.group(2)
obj = obj.lstrip()
except AttributeError:
continue
attrs = {'file': file, 'obj': obj, 'parent': parent if parent != obj else None, 'index': index,
'path': path,
'match': f'{file}.{parent if parent and parent != obj else ""}'
f'{"." if parent and parent != obj else ""}{obj}'}
anchor = DPYSource(**attrs)
anchors.append(anchor)
self.rtfs_anchors = anchors
@commands.command(name='rtfs', aliases=['dsauce', 'dsource', 'dpysauce', 'dpysource'], cls=utils.EvieeCommand)
async def _rtfs(self, ctx, *, source: str=None):
"""Retrieve source code for discord.py.
Parameters
------------
source: [Optional]
The file, function, class, method or path to retrieve source for. Could be none to display
the base URL.
Examples
----------
<prefix>rtfs <source>
{ctx.prefix}rtfs bot.py
{ctx.prefix}rtfs Guild.members
{ctx.prefix}rtfs Guild
{ctx.prefix}rtfs member
"""
orig = source
surl = 'https://github.com/Rapptz/discord.py/blob/rewrite/'
to_return = []
if source is None:
return await ctx.send('https://github.com/Rapptz/discord.py/tree/rewrite/')
if source.endswith('.py'):
source = source.replace('.py', '').lower()
matches = utils.fuzzyfinder(source, [(a, a.file) for a in self.rtfs_anchors],
key=lambda t: t[1], lazy=False)[:5]
for f in matches:
to_return.append(f'[{f[0].file}.py]({surl}{f[0].path}/{f[0].file}.py)')
elif '.' in source:
matches = utils.fuzzyfinder(source, [(a, a.match) for a in self.rtfs_anchors], key=lambda t: t[1],
lazy=False)[:5]
if not matches:
matches = utils.fuzzyfinder(source, [(a, a.match.split('.', 1)[-1]) for a in self.rtfs_anchors],
key=lambda t: t[1], lazy=False)[:5]
for a in matches:
a = a[0]
to_return.append(f'[{a.match}]({surl}{a.path}/{a.file}.py#L{a.index + 1})')
else:
matches = utils.fuzzyfinder(source, [(a, a.obj) for a in self.rtfs_anchors], key=lambda t: t[1],
lazy=False)[:5]
for a in matches:
a = a[0]
to_return.append(f'[{a.match}]({surl}{a.path}/{a.file}.py#L{a.index + 1})')
to_return = set(to_return)
await ctx.send(embed=self.rtfs_embed(orig, sorted(to_return, key=lambda a: len(a))))
@commands.command(name='source', aliases=['sauce'], cls=utils.EvieeCommand)
async def get_source(self, ctx, *, target: str=None):
"""Retrieve the source code of a bot command or cog.
Aliases
---------
sauce
Parameters
------------
target: [Optional]
The command or cog to retrieve source for. Could be none to display the base URL.
Examples
----------
<prefix>source <target>
<prefix>sauce <target>
{ctx.prefix}source prefix
{ctx.prefix}source Fun
"""
if not target:
return await ctx.send('<https://github.com/EvieePy/EvieeBot>')
cmd = self.bot.get_command(target)
cog = self.bot.get_cog(target)
ext = self.bot.get_ext(target)
if cmd:
code = textwrap.dedent(inspect.getsource(cmd.callback))
elif cog:
code = textwrap.dedent(inspect.getsource(cog.__class__))
elif ext:
code = textwrap.dedent(inspect.getsource(ext))
else:
embed = discord.Embed(title=f'Source - <{target.strip()}>',
description=f'Sorry no results were found for {target.strip()}\n\n'
f'Make sure you specify a valid command or cog.',
colour=0x6dc9c9)
embed.add_field(name='EvieeBot', value='https://github.com/EvieePy/EvieeBot')
return await ctx.send(embed=embed)
bin_ = await self.bot.create_bin(data=code)
embed = discord.Embed(title=f'Source - <{target}>', description=f'{bin_}.py', colour=0x6dc9c9)
return await ctx.send(embed=embed)
|
py | b4091e0cbcb67923860233f92badd6a5a6fb1de8 | # encoding: UTF-8
# modified by 华富资产.李来佳.28888502 in 20190104
__author__ = 'CHENXY'
import os,sys
XTP_API_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(XTP_API_PATH)
from xtp_data_type import *
type_dict = {
'uint64_t': 'uint64_t',
'uint32_t': 'uint32_t',
'int64_t': 'int64_t',
'int32_t': 'int32_t',
'char': 'string',
'double': 'float'
}
typedefDict.update(type_dict)
#----------------------------------------------------------------------
def replaceTabs(f):
"""把Tab用4个空格替代"""
l = []
import chardet
for line in f:
charset = chardet.detect(line)
line = line.decode(charset.get('encoding', 'ascii'))
line = line.replace('\t', ' ')
l.append(line)
return l
def main():
"""主函数"""
# c++ 数据类型文件
source_c_struct_common_file = os.path.join(os.getcwd(), '..', 'xtpapi', 'xtp_api_struct_common.h')
target_py_struct_common_file = os.path.join(os.getcwd(), '..', 'xtp_struct_common.py')
fcpp = open(source_c_struct_common_file, 'rb')
fpy = open(target_py_struct_common_file, 'w',encoding='utf-8')
fpy.write('# encoding: UTF-8\n')
fpy.write('\n')
fpy.write('structDict = {}\n')
fpy.write('\n')
print(u'开始分析:{}'.format(source_c_struct_common_file))
lcpp = replaceTabs(fcpp)
for n, line in enumerate(lcpp):
#print n
# 结构体申明注释
if '///' in line and '\t' not in line:
py_line = '#' + line[3:]
if ' //' in line:
py_line = '#' + line[2:]
# 结构体变量注释
elif ' ///' in line:
py_line = '#' + line[4:]
# 结构体申明
elif 'struct ' in line:
content = line.split(' ')
name = content[2].replace('\n','')
name = name.replace('\r', '')
py_line = '%s = {}\n' % name
# 结构体变量
elif ' ' == line[0:4] or '\t' == line[0] and '()' not in line and '{' not in line and '}' not in line and '=' not in line:
line = line.replace('\t', ' ')
content = line.split(' ')
content = [k for k in content if k]
typedef = content[0].replace('\t', '')
typedef = typedef.replace('()', '')
typedef = typedef.replace('\r', '')
typedef = typedef.replace('\n', '')
type_ = typedefDict[typedef]
variable = content[1]
variable = variable.replace(';', "")
variable = variable.replace('\n', "")
variable = variable.replace('\r', "")
if '[' in variable:
k = variable.index('[')
variable = variable[0:k]
py_line = '%s["%s"] = "%s"\n' % (name, variable, type_)
# 结构体结束
elif '}' in line:
py_line = "structDict['%s'] = %s\n\n" % (name, name)
otherName = line.split(' ')[1]
otherName = otherName.replace(';', '')
otherName = otherName.replace('\n', '')
otherName = otherName.replace('\r', '')
second_line = "structDict['%s'] = %s\n\n" % (otherName, name)
py_line = py_line + second_line
# 结构体开始
elif '{' in line:
py_line = ''
# 其他
else:
py_line = '\n'
fpy.write(py_line)
fcpp.close()
fpy.close()
print(u'{}生成过程完成'.format(target_py_struct_common_file))
if __name__ == '__main__':
main() |
py | b4091e9473054e3c7d6d480480483d11ef91ff47 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy import Item, Field
class WeiboItem(Item):
table_name = 'weibo'
id = Field()
content = Field()
forward_count = Field()
comment_count = Field()
like_count = Field()
posted_at = Field()
url = Field()
user = Field()
crawled_at = Field()
|
py | b4091f1662f70fb4e35aea20d827bf75795c3009 | import os
import numpy as np
from ..convert.utils import parse_time
from ..convert.ecs_parser import CalibrationParser
from ..convert.evl_parser import LineParser
data_dir = './echoregions/test_data/ek60/'
output_csv = data_dir + 'output_CSV/'
output_json = data_dir + 'output_JSON/'
def test_parse_time():
# Test converting EV datetime string to numpy datetime64
timestamp = 'D20170625T1539223320'
assert parse_time(timestamp) == np.datetime64('2017-06-25T15:39:22.3320')
def test_plotting_points():
# Test converting points in EV format to plottable values (datetime64 and float)
evl_paths = data_dir + 'x1.bottom.evl'
l_parser = LineParser(evl_paths)
l_parser.to_json(output_json)
evl = l_parser.convert_points(l_parser.output_data['points'])
evl_points = np.array(l_parser.points_dict_to_list(evl))
x = np.array(evl_points[:, 0], dtype=np.datetime64)
y = evl_points[:, 1]
assert len(x) == 13764
assert len(y) == 13764
os.remove(l_parser.output_file)
os.rmdir(output_json)
def test_convert_ecs():
# Test converting an EV calibration file (ECS)
ecs_path = data_dir + 'Summer2017_JuneCal_3freq.ecs'
parser = CalibrationParser(ecs_path)
parser.parse_file(ignore_comments=True)
parser.to_csv(output_csv)
parser.to_json(output_json)
for path in parser.output_file:
assert os.path.exists(path)
os.remove(path)
os.rmdir(output_csv)
os.rmdir(output_json)
def test_convert_evl():
# Test converting an EV lines files (EVL)
evl_path = data_dir + 'x1.bottom.evl'
parser = LineParser(evl_path)
parser.parse_file()
parser.to_csv(output_csv)
parser.to_json(output_json)
for path in parser.output_file:
assert os.path.exists(path)
os.remove(path)
os.rmdir(output_csv)
os.rmdir(output_json)
|
py | b4091f8296b7b70afe96926bd1c591aa97d72590 | import logging
import typing as t
import discord
from discord.ext import commands, tasks
from aitchi.aitchi import Aitchi
from aitchi.config import Config, Secrets
from aitchi.persistence import Store
log = logging.getLogger(__name__)
def deep_lookup(mapping: t.Mapping, path: t.Iterable[t.Hashable]) -> t.Any:
"""Fetch value at `path` in `mapping`."""
value = mapping
for item in path:
value = value[item]
return value
class TikTokVideo:
"""Remote video representation."""
id: t.Annotated[str, "video", "id"]
description: t.Annotated[str, "desc"]
auth_id: t.Annotated[str, "author", "uniqueId"]
auth_avatar: t.Annotated[str, "author", "avatarThumb"]
auth_nickname: t.Annotated[str, "author", "nickname"]
def __init__(self, dictionary: dict[str, t.Any]) -> None:
"""Initialise instance from `dictionary`."""
for attr_name, annotation in self.__annotations__.items():
value = deep_lookup(dictionary, annotation.__metadata__)
setattr(self, attr_name, value)
@property
def auth_url(self) -> str:
"""Construct URL for the author's page."""
return f"https://www.tiktok.com/@{self.auth_id}"
@property
def video_url(self) -> str:
"""Construct URL for the video's page."""
return f"{self.auth_url}/video/{self.id}"
@property
def embed(self) -> discord.Embed:
"""Construct Discord embed representation."""
embed = discord.Embed(
title="New TikTok video!",
description=f"{self.description}"[:2048],
colour=discord.Colour(0xFAFAFA),
url=self.video_url,
)
embed.set_author(name=self.auth_nickname, url=self.auth_url, icon_url=self.auth_avatar)
embed.set_footer(text="TikTok", icon_url=Config.tiktok_logo)
return embed
class TikTok(commands.Cog):
"""
TikTok notifications.
This extension is responsible for polling TikTok for new videos from the configured user. When a new video
is found, a notification is sent to the configured channel.
"""
def __init__(self, bot: Aitchi) -> None:
"""Initialise store & start daemon."""
self.bot = bot
self.store = Store(namespace="tiktok")
self.daemon.start()
async def report_error(self, exception: Exception) -> None:
"""
Send an error report to the configured log channel.
The `exception` message will be included.
"""
log.debug(f"Dispatching error report for exception of type: {type(exception)}")
await self.bot.wait_until_ready()
log_channel: t.Optional[discord.TextChannel] = self.bot.get_channel(Config.log_channel)
if log_channel is None:
log.critical(f"Failed to acquire configured log channel: {Config.log_channel} not found!")
return
await log_channel.send(f"TikTok daemon encountered exception:\n```{exception}```")
async def fetch_videos(self) -> list[TikTokVideo]:
"""
Poll TikTok API for the last 10 videos from the configured user.
Raise exception on non-200 responses, or when the response lacks expected keys.
"""
log.debug("Polling TikTok API for recent videos!")
params = {
"count": 10, # How many videos to get
"cursor": 0, # Starting from 0th video, i.e. last 10
"aid": Secrets.tiktok_id, # Application ID
"secUid": Config.tiktok_target, # Target user
}
async with self.bot.http_session.get("https://m.tiktok.com/api/post/item_list", params=params) as resp:
if resp.status != 200:
raise Exception(f"Failed to get video list due to status: {resp.status}")
log.debug("Fetch successful, parsing JSON response")
payload = await resp.json()
if "itemList" not in payload:
raise Exception(f"Response payload lacks 'itemList' key: {payload}")
return [TikTokVideo(item) for item in payload["itemList"]]
async def daemon_main(self) -> None:
"""
Check for previously unseen videos and notify the configured channel.
Propagate exceptions in unexpected cases. The caller is responsible for error handling.
"""
log.debug("Daemon main: fetching video list")
recent_videos = await self.fetch_videos()
seen_video_ids: list[str] = self.store.get("seen_videos")
if seen_video_ids is None:
log.debug("Daemon main: store is empty, caching recent videos (maiden case)")
self.store.set("seen_videos", [video.id for video in recent_videos])
return
new_videos = [video for video in recent_videos if video.id not in seen_video_ids]
if not new_videos:
log.debug("Daemon main: found no unseen videos")
return
log.debug(f"Found {len(new_videos)} new videos!")
await self.bot.wait_until_ready() # Ensure cache is ready before we grab the channel
notification_channel: t.Optional[discord.TextChannel] = self.bot.get_channel(Config.notification_channel)
if notification_channel is None:
raise Exception(f"Failed to acquire configured notification channel: {Config.notification_channel}")
log.debug(f"Sending notifications to: #{notification_channel.name}")
for new_video in new_videos:
await notification_channel.send(embed=new_video.embed)
log.debug("Caching new videos")
self.store.set("seen_videos", [video.id for video in new_videos] + seen_video_ids)
@tasks.loop(minutes=30)
async def daemon(self) -> None:
"""
Periodically call `daemon_main`.
If an exception propagates out of the main, send an alert to the configured log channel. This will generally
happen if the API returns an unexpected response.
"""
log.info("Daemon: invoking main")
try:
await self.daemon_main()
except Exception as exc:
log.error("Daemon encountered an unhandled exception!", exc_info=exc)
await self.report_error(exc)
else:
log.debug("Daemon pass complete")
def setup(bot: Aitchi) -> None:
"""Load cog."""
bot.add_cog(TikTok(bot))
|
py | b4091f83356f6c86a7695b50c89794eaba393545 |
import os
import sys
import time
import textwrap
from subprocess import Popen
def save(file):
os.chdir(r"C:\Program Files (x86)\BioTek\Liquid Handling Control 2.22")
script = r"""
SetKeyDelay, 10
SetTitleMatchMode, RegEx
WinWait FILE_LHC.*
WinActivate FILE_LHC.*
Click, 25 40
Click, 50 225
WinWait Print
WinActivate Print
Send {Enter}
WinWait Save Print Output As
WinActivate Save Print Output As
Send {Raw}FILE_PDF
Send {Enter}
WinActivate FILE_LHC.*
Send {Alt}{Up}{Up}{Enter}
"""
script = textwrap.dedent(script)
file_lhc = file.split("\\")[-1]
file_pdf = file_lhc.split(".")[0] + ".pdf"
file_pdf = "C:\\pharmbio\\" + file_pdf
script = script.replace("FILE_LHC", file_lhc)
script = script.replace("FILE_PDF", file_pdf)
script = "\nSleep, 50\n".join(script.split("\n"))
ahk_filepath = "C:\\pharmbio\\ahk.ahk"
if os.path.exists(file_pdf):
os.remove(file_pdf)
with open(ahk_filepath, "w") as fp:
fp.write(script)
p = Popen([r"C:\Program Files (x86)\BioTek\Liquid Handling Control 2.22\Liquid Handling Control.exe", file])
time.sleep(5)
print("Running ahk")
p2 = Popen([r"C:\Program Files\AutoHotkey\AutoHotkey.exe", ahk_filepath], stdout=sys.stdout, stderr=sys.stderr)
p2.wait()
print("ahk done")
p.wait()
print("lhc done")
files = []
path = r"C:\ProgramData\BioTek\Liquid Handling Control 2.22\Protocols\automation_v3.1"
for file in os.listdir(path):
if file.lower().endswith('.lhc'):
files += [path + "\\" + file]
for file in files:
print(file)
save(file)
|
py | b4091fa0d44f69d133382e542bedb4503ac1a0e2 | brasileirao = 'CORINTHIAS', 'PALMEIRAS', 'SANTOS', 'GRÊMIO',\
'CRUZEIRO', 'FLAMENGO', 'VASCO', 'CHAPECOENSE',\
'ATLÉTICO', 'BOTAFOGO', 'ATLÉTICO - PR', 'BAHIA', 'SÃO PAULO',\
'FLUMINENSE', 'SPORT RECIFE', 'EC VITÓRIA', 'CORITIBA', 'AVAÍ', \
'PONTE PRETA', 'ATLÉTICO - GO'
print("-="*150)
print(f"A classificação da Brasileirão é a seguinte: {brasileirao}")
print("-="*150)
print(f"Os cinco primeiros colocados são: {brasileirao[:5]}")
print("-="*150)
print(f"Os últimos 4 colocados são: {brasileirao[-4:]}")
print("-="*150)
print(f"Em ordem alfabética, os times são: {sorted(brasileirao)}")
print("-="*150)
print(f"O chapecoense está na {brasileirao.index('CHAPECOENSE') + 1}º posição.")
print("-="*150)
|
py | b4091fda2c9eedd562b368a1ab9f4eba8b0c5943 | # -*- coding: utf-8 -*-
# @Time : 1/16/19 6:40 AM
# @Author : zhoujun
from .straight_text import straight_text_metrics
from .curve_text import curve_text_metrics
def cal_recall_precision_f1(gt_path, result_path, text_type='curve', show_result=False):
if text_type == 'curve':
return curve_text_metrics(gt_path, result_path, show_result)
elif text_type == 'straight':
return straight_text_metrics(gt_path, result_path, show_result)
else:
raise NotImplementedError('invalid text type!')
|
py | b409224de5af694892d21a94c3b8231bcc0aa489 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id$
#
# Project: GDAL/OGR Test Suite
# Purpose: LIBKML Driver testing.
# Author: Even Rouault <even dot rouault at mines dash paris dot org>
#
###############################################################################
# Copyright (c) 2010-2014, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import sys
sys.path.append('../pymod')
import gdaltest
import ogrtest
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
###############################################################################
# Test basic open operation for KML datastore.
#
def ogr_libkml_datastore():
ogrtest.kml_ds = None
ogrtest.have_read_libkml = 0
ogrtest.kml_drv = None
ogrtest.libkml_drv = None
try:
ogrtest.kml_drv = ogr.GetDriverByName('LIBKML')
except:
pass
if ogrtest.kml_drv is None:
return 'skip'
try:
ogrtest.kml_drv = ogr.GetDriverByName('KML')
except:
pass
# Unregister KML driver if present as its behaviour is not identical
# to new LIBKML driver
if ogrtest.kml_drv is not None:
print('Unregister KML driver')
ogrtest.kml_drv.Deregister()
try:
ogrtest.kml_ds = ogr.Open('data/samples.kml')
except:
pass
if ogrtest.kml_ds is not None:
ogrtest.have_read_libkml = 1
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds.GetLayerCount() != 10:
gdaltest.post_reason('wrong number of layers')
print(ogrtest.kml_ds.GetLayerCount())
return 'fail'
return 'success'
###############################################################################
# Test reading attributes for first layer (point).
#
def ogr_libkml_attributes_1():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Placemarks')
feat = lyr.GetNextFeature()
if feat.GetField('Name') != 'Simple placemark':
gdaltest.post_reason('Wrong name field value')
return 'fail'
if feat.GetField('description')[:23] != 'Attached to the ground.':
gdaltest.post_reason('Wrong description field value')
print('got: ', feat.GetField('description')[:23])
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
if feat.GetField('Name') != 'Floating placemark':
gdaltest.post_reason('Wrong name field value')
return 'fail'
if feat.GetField('description')[:25] != 'Floats a defined distance':
gdaltest.post_reason('Wrong description field value')
print('got: ', feat.GetField('description')[:25])
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
if feat.GetField('Name') != 'Extruded placemark':
gdaltest.post_reason('Wrong name field value')
return 'fail'
if feat.GetField('description') != 'Tethered to the ground by a customizable\n \"tail\"':
gdaltest.post_reason('Wrong description field value')
print('got: ', feat.GetField('description'))
return 'fail'
return 'success'
###############################################################################
# Test reading attributes for another layer (point).
#
def ogr_libkml_attributes_2():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Highlighted Icon')
feat = lyr.GetNextFeature()
if feat.GetField('Name') != 'Roll over this icon':
gdaltest.post_reason('Wrong name field value')
return 'fail'
if feat.GetField('description') is not None:
gdaltest.post_reason('Wrong description field value')
print("'%s'" % feat.GetField('description'))
return 'fail'
feat = lyr.GetNextFeature()
if feat is not None:
gdaltest.post_reason('unexpected feature found.')
return 'fail'
return 'success'
###############################################################################
# Test reading attributes for another layer (linestring).
#
def ogr_libkml_attributes_3():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Paths')
feat = lyr.GetNextFeature()
if feat.GetField('Name') != 'Tessellated':
gdaltest.post_reason('Wrong name field value')
return 'fail'
if feat.GetField('description') != 'If the <tessellate> tag has a value of 1, the line will contour to the underlying terrain':
gdaltest.post_reason('Wrong description field value')
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
if feat.GetField('Name') != 'Untessellated':
gdaltest.post_reason('Wrong name field value')
return 'fail'
if feat.GetField('description') != 'If the <tessellate> tag has a value of 0, the line follow a simple straight-line path from point to point':
gdaltest.post_reason('Wrong description field value')
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
return 'success'
###############################################################################
# Test reading attributes for another layer (polygon).
#
def ogr_libkml_attributes_4():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Google Campus')
feat = lyr.GetNextFeature()
i = 40
while feat is not None:
name = 'Building %d' % i
if feat.GetField('Name') != name:
gdaltest.post_reason('Wrong name field value')
print('Got: "%s"' % feat.GetField('name'))
return 'fail'
if feat.GetField('description') is not None:
gdaltest.post_reason('Wrong description field value')
return 'fail'
i = i + 1
feat = lyr.GetNextFeature()
return 'success'
###############################################################################
# Test reading of KML point geometry
#
def ogr_libkml_point_read():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Placemarks')
lyr.ResetReading()
feat = lyr.GetNextFeature()
wkt = 'POINT(-122.0822035425683 37.42228990140251)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'POINT(-122.084075 37.4220033612141 50)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'POINT(-122.0857667006183 37.42156927867553 50)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
return 'success'
###############################################################################
# Test reading of KML linestring geometry
#
def ogr_libkml_linestring_read():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Paths')
lyr.ResetReading()
feat = lyr.GetNextFeature()
wkt = 'LINESTRING (-112.081423783034495 36.106778704771372 0, -112.087026775269294 36.0905099328766 0)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'LINESTRING (-112.080622229594994 36.106734600079953 0,-112.085242575314993 36.090495986124218 0)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'LINESTRING (-112.265654928602004 36.094476726025462 2357,-112.266038452823807 36.093426088386707 2357,-112.266813901345301 36.092510587768807 2357,-112.267782683444494 36.091898273579957 2357,-112.268855751095202 36.091313794118697 2357,-112.269481071721899 36.090367720752099 2357,-112.269526855561097 36.089321714872852 2357,-112.269014456727604 36.088509160604723 2357,-112.268152881533894 36.087538135979557 2357,-112.2670588176031 36.086826852625677 2357,-112.265737458732104 36.086463123013033 2357)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
return 'success'
###############################################################################
# Test reading of KML polygon geometry
#
def ogr_libkml_polygon_read():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is None:
gdaltest.post_reason('libkml_ds is none')
return 'fail'
lyr = ogrtest.kml_ds.GetLayerByName('Google Campus')
lyr.ResetReading()
feat = lyr.GetNextFeature()
wkt = 'POLYGON ((-122.084893845961204 37.422571240447859 17,-122.084958097919795 37.422119226268563 17,-122.084746957304702 37.42207183952619 17,-122.084572538096197 37.422090067296757 17,-122.084595488672306 37.422159327008949 17,-122.0838521118269 37.422272785643713 17,-122.083792243334997 37.422035391120843 17,-122.0835076656616 37.422090069571063 17,-122.083470946415204 37.422009873951609 17,-122.083122108574798 37.422104649494599 17,-122.082924737457205 37.422265039903863 17,-122.082933916938501 37.422312428430942 17,-122.083383735973698 37.422250460876178 17,-122.083360785424802 37.422341592287452 17,-122.083420455164202 37.42237075460644 17,-122.083659133885007 37.422512920110009 17,-122.083975843895203 37.422658730937812 17,-122.084237474333094 37.422651439725207 17,-122.0845036949503 37.422651438643499 17,-122.0848020460801 37.422611339163147 17,-122.084788275051494 37.422563950551208 17,-122.084893845961204 37.422571240447859 17))'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'POLYGON ((-122.085741277148301 37.422270331552568 17,-122.085816976848093 37.422314088323461 17,-122.085852582875006 37.422303374697442 17,-122.085879994563896 37.422256861387893 17,-122.085886010140896 37.422231107613797 17,-122.085806915728796 37.422202501738553 17,-122.085837954265301 37.42214027058678 17,-122.085673264051906 37.422086902144081 17,-122.085602292640701 37.42214885429042 17,-122.085590277843593 37.422128290487002 17,-122.085584167223701 37.422081719672462 17,-122.085485206574106 37.42210455874995 17,-122.085506726435199 37.422142679498243 17,-122.085443071291493 37.422127838461719 17,-122.085099071490404 37.42251282407603 17,-122.085676981863202 37.422818153236513 17,-122.086016227378295 37.422449188587223 17,-122.085726032700407 37.422292396042529 17,-122.085741277148301 37.422270331552568 17))'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'POLYGON ((-122.085786228724203 37.421362088869692 25,-122.085731299060299 37.421369359894811 25,-122.085731299291794 37.421409349109027 25,-122.085607707367899 37.421383901665649 25,-122.085580242651602 37.42137299550869 25,-122.085218622197104 37.421372995043157 25,-122.085227776563897 37.421616565082651 25,-122.085259818934702 37.421605658944031 25,-122.085259818549901 37.421682001560001 25,-122.085236931147804 37.421700178603459 25,-122.085264395782801 37.421761979825753 25,-122.085323903274599 37.421761980139067 25,-122.085355945432397 37.421852864451999 25,-122.085410875246296 37.421889218237339 25,-122.085479537935697 37.42189285337048 25,-122.085543622981902 37.421889217975462 25,-122.085626017804202 37.421860134999257 25,-122.085937287963006 37.421860134536047 25,-122.085942871866607 37.42160898590042 25,-122.085965546986102 37.421579927591438 25,-122.085864046234093 37.421471150029568 25,-122.0858548911215 37.421405713261841 25,-122.085809116276806 37.4214057134039 25,-122.085786228724203 37.421362088869692 25))'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
feat = lyr.GetNextFeature()
if feat is None:
gdaltest.post_reason('expected feature not found.')
return 'fail'
wkt = 'POLYGON ((-122.084437112828397 37.421772530030907 19,-122.084511885574599 37.421911115428962 19,-122.0850470999805 37.421787551215353 19,-122.085071991339106 37.421436630231611 19,-122.084916406231997 37.421372378221157 19,-122.084219386816699 37.421372378016258 19,-122.084219386589993 37.421476171614962 19,-122.083808641999099 37.4214613409357 19,-122.083789972856394 37.421313064107963 19,-122.083279653469802 37.421293288405927 19,-122.083260981920702 37.421392139442979 19,-122.082937362173695 37.421372363998763 19,-122.082906242566693 37.421515697788713 19,-122.082850226966499 37.421762825764652 19,-122.082943578863507 37.421767769696352 19,-122.083217411188002 37.421792485526858 19,-122.0835970430103 37.421748007445601 19,-122.083945555677104 37.421693642376027 19,-122.084007789463698 37.421762838158529 19,-122.084113587521003 37.421748011043917 19,-122.084076247378405 37.421713412923751 19,-122.084144704773905 37.421678815345693 19,-122.084144704222993 37.421817206601972 19,-122.084250333307395 37.421817070044597 19,-122.084437112828397 37.421772530030907 19))'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
return 'success'
###############################################################################
# Write test
def ogr_libkml_write(filename):
if ogrtest.kml_drv is None:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource(filename)
if filename != '/vsimem/libkml_use_doc_off.kmz':
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS('WGS72')
lyr = ds.CreateLayer('test_wgs72', srs=srs)
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
if dst_feat.GetGeometryRef().ExportToWkt() != 'POINT (2 49)':
print(dst_feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('CreateFeature changed the geometry.')
return 'fail'
lyr = ds.CreateLayer('test_wgs84')
fielddefn = ogr.FieldDefn('name', ogr.OFTString)
lyr.CreateField(fielddefn)
fielddefn = ogr.FieldDefn('description', ogr.OFTString)
lyr.CreateField(fielddefn)
fielddefn = ogr.FieldDefn('foo', ogr.OFTString)
lyr.CreateField(fielddefn)
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetField('name', 'my_name')
dst_feat.SetField('description', 'my_description')
dst_feat.SetField('foo', 'bar')
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49 1)'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('LINESTRING (0 1,2 3)'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON ((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0),(0.25 0.25 0,0.25 0.75 0,0.75 0.75 0,0.75 0.25 0,0.25 0.25 0))'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOINT (2 49,2 49)'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('MULTILINESTRING ((0 1,2 3),(0 1,2 3))'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOLYGON (((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0),(0.25 0.25 0,0.25 0.75 0,0.75 0.75 0,0.75 0.25 0,0.25 0.25 0)),((-0.25 0.25 0,-0.25 0.75 0,-0.75 0.75 0,-0.75 0.25 0,-0.25 0.25 0)))'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('GEOMETRYCOLLECTION (POINT (2 49 1),LINESTRING (0 1,2 3))'))
if lyr.CreateFeature(dst_feat) != 0:
gdaltest.post_reason('CreateFeature failed.')
return 'fail'
ds = None
return 'success'
###############################################################################
# Check previous test
def ogr_libkml_check_write(filename):
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open(filename)
if filename != '/vsimem/libkml_use_doc_off.kmz':
lyr = ds.GetLayerByName('test_wgs84')
else:
lyr = ds.GetLayer(0)
if lyr.GetFeatureCount() != 8:
gdaltest.post_reason('Bad feature count.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('name') != 'my_name':
print(feat.GetField('name'))
gdaltest.post_reason('Unexpected name.')
return 'fail'
if feat.GetField('description') != 'my_description':
print(feat.GetField('description'))
gdaltest.post_reason('Unexpected description.')
return 'fail'
if feat.GetField('foo') != 'bar':
print(feat.GetField('foo'))
gdaltest.post_reason('Unexpected foo.')
return 'fail'
if feat.GetGeometryRef().ExportToWkt() != 'POINT (2 49 0)':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POINT (2 49 1)':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'LINESTRING (0 1 0,2 3 0)':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POLYGON ((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0),(0.25 0.25 0,0.25 0.75 0,0.75 0.75 0,0.75 0.25 0,0.25 0.25 0))':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTIPOINT (2 49 0,2 49 0)':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTILINESTRING ((0 1 0,2 3 0),(0 1 0,2 3 0))':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'MULTIPOLYGON (((0 0 0,0 1 0,1 1 0,1 0 0,0 0 0),(0.25 0.25 0,0.25 0.75 0,0.75 0.75 0,0.75 0.25 0,0.25 0.25 0)),((-0.25 0.25 0,-0.25 0.75 0,-0.75 0.75 0,-0.75 0.25 0,-0.25 0.25 0)))':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'GEOMETRYCOLLECTION (POINT (2 49 1),LINESTRING (0 1 0,2 3 0))':
print(feat.GetGeometryRef().ExportToWkt())
gdaltest.post_reason('Unexpected geometry.')
return 'fail'
ds = None
return 'success'
###############################################################################
def ogr_libkml_write_kml():
return ogr_libkml_write('/vsimem/libkml.kml')
def ogr_libkml_check_write_kml():
return ogr_libkml_check_write('/vsimem/libkml.kml')
def ogr_libkml_write_kmz():
return ogr_libkml_write('/vsimem/libkml.kmz')
def ogr_libkml_check_write_kmz():
return ogr_libkml_check_write('/vsimem/libkml.kmz')
def ogr_libkml_write_kmz_use_doc_off():
gdal.SetConfigOption("LIBKML_USE_DOC.KML", "NO")
ret = ogr_libkml_write('/vsimem/libkml_use_doc_off.kmz')
gdal.SetConfigOption("LIBKML_USE_DOC.KML", None)
return ret
def ogr_libkml_check_write_kmz_use_doc_off():
return ogr_libkml_check_write('/vsimem/libkml_use_doc_off.kmz')
def ogr_libkml_write_dir():
return ogr_libkml_write('/vsimem/libkmldir')
def ogr_libkml_check_write_dir():
if not ogrtest.have_read_libkml:
return 'skip'
ret = ogr_libkml_check_write('/vsimem/libkmldir')
files = gdal.ReadDir('/vsimem/libkmldir')
for filename in files:
gdal.Unlink('/vsimem/libkmldir/' + filename)
gdal.Rmdir('/vsimem/libkmldir')
return ret
###############################################################################
# Test reading attributes with XML content in them
#
def ogr_libkml_xml_attributes():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/description_with_xml.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('description').find('Description<br></br><i attr="val">Interesting</i><br></br>') != 0:
gdaltest.post_reason('Wrong description field value')
print('got: %s ' % feat.GetField('description'))
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading all geometry types (#3558)
#
def ogr_libkml_read_geometries():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/geometries.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
while feat is not None:
feat = lyr.GetNextFeature()
ds = None
return 'success'
###############################################################################
# Run test_ogrsf
def ogr_libkml_test_ogrsf():
if not ogrtest.have_read_libkml:
return 'skip'
import test_cli_utilities
if test_cli_utilities.get_test_ogrsf_path() is None:
return 'skip'
ret = gdaltest.runexternal(test_cli_utilities.get_test_ogrsf_path() + ' --config OGR_SKIP KML -ro data/samples.kml')
if ret.find("using driver `LIBKML'") == -1 or ret.find('INFO') == -1 or ret.find('ERROR') != -1:
print(ret)
return 'fail'
return 'success'
###############################################################################
# Test reading KML with only Placemark
def ogr_libkml_read_placemark():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/placemark.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat is None:
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML without any layer
def ogr_libkml_read_empty():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/empty.kml')
if ds.GetLayerCount() != 0:
gdaltest.post_reason('failed')
print(ds.GetLayerCount())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML with empty layers
def ogr_libkml_read_emptylayers():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/emptylayers.kml')
if ds.GetLayerCount() != 2:
gdaltest.post_reason('failed')
print(ds.GetLayerCount())
return 'fail'
# --> One difference with the old KML driver
if ds.GetLayer(0).GetFeatureCount() != 1:
gdaltest.post_reason('failed')
print(ds.GetLayer(0).GetFeatureCount())
return 'fail'
if ds.GetLayer(1).GetFeatureCount() != 0:
gdaltest.post_reason('failed')
print(ds.GetLayer(1).GetFeatureCount())
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML with empty layers
###############################################################################
# Test reading KML with empty layers without folder
def ogr_libkml_read_emptylayers_without_folder():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/emptylayers_without_folder.kml')
if ds.GetLayerCount() != 1:
gdaltest.post_reason('failed')
print(ds.GetLayerCount())
return 'fail'
# --> One difference with the old KML driver
if ds.GetLayer(0).GetName() != 'Test':
gdaltest.post_reason('failed')
print("Layer name must be '" + ds.GetLayer(0).GetName() + "'.")
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML with empty layers without_folder
def ogr_libkml_read_schema():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/test_schema.kml')
if ds.GetLayerCount() != 4:
gdaltest.post_reason('failed')
print(ds.GetLayerCount())
return 'fail'
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('foo') != 'bar':
gdaltest.post_reason('failed')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayer(1)
feat = lyr.GetNextFeature()
if feat.GetField('foo') != 'baz':
gdaltest.post_reason('failed')
feat.DumpReadable()
return 'fail'
lyr = ds.GetLayer(2)
if lyr.GetLayerDefn().GetFieldIndex('foo') != -1:
gdaltest.post_reason('failed')
return 'fail'
lyr = ds.GetLayer(3)
if lyr.GetLayerDefn().GetFieldIndex('foo') != -1:
gdaltest.post_reason('failed')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML with <Data> elements of <ExtendedData> in case
# <ExtendedData> doesn't use a <SchemaData> (test changeset r22127)
def ogr_libkml_extended_data_without_schema_data():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/extended_data_without_schema_data.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('field1') != '1_1':
gdaltest.post_reason('failed')
feat.DumpReadable()
return 'fail'
if feat.GetField('field2') != '1_2':
gdaltest.post_reason('failed')
feat.DumpReadable()
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetField('field1') != '2_1':
gdaltest.post_reason('failed')
feat.DumpReadable()
return 'fail'
if feat.IsFieldSet('field2'):
gdaltest.post_reason('failed')
feat.DumpReadable()
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML with <gx:Track> element (#5095)
def ogr_libkml_gxtrack():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/gxtrack.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('begin') != '2013/05/28 12:00:00' or \
feat.GetField('end') != '2013/05/28 13:00:00' or \
feat.GetGeometryRef().ExportToWkt() != 'LINESTRING (2 49,3 50)':
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test reading KML with <gx:MultiTrack> element
def ogr_libkml_gxmultitrack():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/gxmultitrack.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('begin') != '2013/05/28 12:00:00' or \
feat.GetField('end') != '2013/05/28 13:00:00' or \
feat.GetGeometryRef().ExportToWkt() != 'MULTILINESTRING ((2 49,3 50))':
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test generating and reading KML with <Camera> element
def ogr_libkml_camera():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_camera.kml")
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn("heading", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("tilt", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("roll", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("altitudeMode", ogr.OFTString))
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
dst_feat.SetField("heading", 70)
dst_feat.SetField("tilt", 75)
dst_feat.SetField("roll", 10)
with gdaltest.error_handler():
lyr.CreateFeature(dst_feat)
dst_feat = ogr.Feature(lyr.GetLayerDefn())
dst_feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (3 50 1)'))
dst_feat.SetField("heading", -70)
dst_feat.SetField("altitudeMode", "relativeToGround")
lyr.CreateFeature(dst_feat)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_camera.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<Camera>') == -1 or \
data.find('<longitude>2</longitude>') == -1 or \
data.find('<latitude>49</latitude>') == -1 or \
data.find('<heading>70</heading>') == -1 or \
data.find('<tilt>75</tilt>') == -1 or \
data.find('<roll>10</roll>') == -1 or \
data.find('<altitudeMode>relativeToGround</altitudeMode>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
ds = ogr.Open('/vsimem/ogr_libkml_camera.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if (feat.GetGeometryRef().ExportToWkt() != 'POINT (2 49 0)' or
feat.GetField("heading") != 70.0 or
feat.GetField("tilt") != 75.0 or
feat.GetField("roll") != 10.0):
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
feat = lyr.GetNextFeature()
if (feat.GetGeometryRef().ExportToWkt() != 'POINT (3 50 1)' or
feat.GetField("heading") != -70.0 or
feat.IsFieldSet("tilt") or
feat.IsFieldSet("roll") or
feat.GetField("altitudeMode") != 'relativeToGround'):
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test generating a LookAt element at Document level
def ogr_libkml_write_layer_lookat():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_layer_lookat.kml")
options = ['LOOKAT_LONGITUDE=2', 'LOOKAT_LATITUDE=49', 'LOOKAT_RANGE=150']
ds.CreateLayer('test', options=options)
options = ['LOOKAT_LONGITUDE=3', 'LOOKAT_LATITUDE=50', 'LOOKAT_RANGE=250',
'LOOKAT_ALTITUDE=100', 'LOOKAT_HEADING=70', 'LOOKAT_TILT=50', 'LOOKAT_ALTITUDEMODE=relativeToGround']
ds.CreateLayer('test2', options=options)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_layer_lookat.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<LookAt>') == -1 or \
data.find('<longitude>2</longitude>') == -1 or \
data.find('<latitude>49</latitude>') == -1 or \
data.find('<range>150</range>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
if data.find('<LookAt>') == -1 or \
data.find('<longitude>3</longitude>') == -1 or \
data.find('<latitude>50</latitude>') == -1 or \
data.find('<altitude>100</altitude>') == -1 or \
data.find('<heading>70</heading>') == -1 or \
data.find('<tilt>50</tilt>') == -1 or \
data.find('<range>150</range>') == -1 or \
data.find('<altitudeMode>relativeToGround</altitudeMode>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test generating a Camera element at Document level
def ogr_libkml_write_layer_camera():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_layer_camera.kml")
options = ['CAMERA_LONGITUDE=3', 'CAMERA_LATITUDE=50', 'CAMERA_ALTITUDE=100',
'CAMERA_HEADING=70', 'CAMERA_TILT=50', 'CAMERA_ROLL=10', 'CAMERA_ALTITUDEMODE=relativeToGround']
ds.CreateLayer('test', options=options)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_layer_camera.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<Camera>') == -1 or \
data.find('<longitude>3</longitude>') == -1 or \
data.find('<latitude>50</latitude>') == -1 or \
data.find('<altitude>100</altitude>') == -1 or \
data.find('<heading>70</heading>') == -1 or \
data.find('<tilt>50</tilt>') == -1 or \
data.find('<roll>10</roll>') == -1 or \
data.find('<altitudeMode>relativeToGround</altitudeMode>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing MultiGeometry
def ogr_libkml_write_multigeometry():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_multigeometry.kml")
lyr = ds.CreateLayer('test')
feat = ogr.Feature(lyr.GetLayerDefn())
# Transformed into POINT per ATC 66
feat.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOINT(0 1)'))
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
# Warning emitted per ATC 66
feat.SetGeometry(ogr.CreateGeometryFromWkt('MULTIPOINT EMPTY'))
with gdaltest.error_handler():
lyr.CreateFeature(feat)
ds = None
ds = ogr.Open("/vsimem/ogr_libkml_write_multigeometry.kml")
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1 0)':
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetGeometryRef().ExportToWkt() != 'GEOMETRYCOLLECTION EMPTY':
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing <snippet>
def ogr_libkml_write_snippet():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_snippet.kml")
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn("snippet", ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField('snippet', 'test_snippet')
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(0 1)'))
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_snippet.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<snippet>test_snippet</snippet>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
ds = ogr.Open("/vsimem/ogr_libkml_write_snippet.kml")
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('snippet') != 'test_snippet':
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
if feat.GetGeometryRef().ExportToWkt() != 'POINT (0 1 0)':
feat.DumpReadable()
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing <atom:author>
def ogr_libkml_write_atom_author():
if not ogrtest.have_read_libkml:
return 'skip'
filepath = '/vsimem/ogr_libkml_write_atom_author.kml'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource(filepath,
options=['author_name=name', 'author_uri=http://foo', '[email protected]'])
if ds is None:
gdaltest.post_reason('Unable to create %s.' % filepath)
return 'fail'
ds = None
f = gdal.VSIFOpenL(filepath, 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">') == -1 or \
data.find('<atom:name>name</atom:name>') == -1 or \
data.find('<atom:uri>http://foo</atom:uri>') == -1 or \
data.find('<atom:email>[email protected]</atom:email>') == -1:
print(data)
gdaltest.post_reason('failure to find an atom string')
return 'fail'
return 'success'
###############################################################################
# Test writing <atom:link>
def ogr_libkml_write_atom_link():
if not ogrtest.have_read_libkml:
return 'skip'
filepath = '/vsimem/ogr_libkml_write_atom_link.kml'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource(filepath,
options=['link=http://foo'])
if ds is None:
gdaltest.post_reason('Unable to create %s.' % filepath)
return 'fail'
ds = None
f = gdal.VSIFOpenL(filepath, 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<kml xmlns="http://www.opengis.net/kml/2.2" xmlns:atom="http://www.w3.org/2005/Atom">') == -1 or \
data.find('<atom:link href="http://foo" rel="related"/>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing <phoneNumber>
def ogr_libkml_write_phonenumber():
if not ogrtest.have_read_libkml:
return 'skip'
filepath = '/vsimem/ogr_libkml_write_phonenumber.kml'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource(filepath,
options=['phonenumber=tel:911'])
if ds is None:
gdaltest.post_reason('Unable to create %s.' % filepath)
return 'fail'
ds = None
f = gdal.VSIFOpenL(filepath, 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<phoneNumber>tel:911</phoneNumber>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing Region
def ogr_libkml_write_region():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_region.kml")
lyr = ds.CreateLayer('auto', options=['ADD_REGION=YES'])
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POLYGON((2 48,2 49,3 49,3 48,2 48))'))
lyr.CreateFeature(feat)
lyr = ds.CreateLayer('manual', options=['ADD_REGION=YES', 'REGION_XMIN=-180',
'REGION_XMAX=180', 'REGION_YMIN=-90', 'REGION_YMAX=90',
'REGION_MIN_LOD_PIXELS=128', 'REGION_MAX_LOD_PIXELS=10000000',
'REGION_MIN_FADE_EXTENT=1', 'REGION_MAX_FADE_EXTENT=2'])
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_region.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<north>49</north>') == -1 or \
data.find('<south>48</south>') == -1 or \
data.find('<east>3</east>') == -1 or \
data.find('<west>2</west>') == -1 or \
data.find('<minLodPixels>256</minLodPixels>') == -1 or \
data.find('<maxLodPixels>-1</maxLodPixels>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
if data.find('<north>90</north>') == -1 or \
data.find('<south>-90</south>') == -1 or \
data.find('<east>180</east>') == -1 or \
data.find('<west>-180</west>') == -1 or \
data.find('<minLodPixels>128</minLodPixels>') == -1 or \
data.find('<maxLodPixels>10000000</maxLodPixels>') == -1 or \
data.find('<minFadeExtent>1</minFadeExtent>') == -1 or \
data.find('<maxFadeExtent>2</maxFadeExtent>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing ScreenOverlay
def ogr_libkml_write_screenoverlay():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_screenoverlay.kml")
ds.CreateLayer('auto', options=['SO_HREF=http://foo'])
ds.CreateLayer('manual', options=['SO_HREF=http://bar',
'SO_NAME=name',
'SO_DESCRIPTION=description',
'SO_OVERLAY_X=10',
'SO_OVERLAY_Y=20',
'SO_OVERLAY_XUNITS=pixels',
'SO_OVERLAY_YUNITS=pixels',
'SO_SCREEN_X=0.4',
'SO_SCREEN_Y=0.5',
'SO_SCREEN_XUNITS=fraction',
'SO_SCREEN_YUNITS=fraction',
'SO_SIZE_X=1.1',
'SO_SIZE_Y=1.2',
'SO_SIZE_XUNITS=fraction',
'SO_SIZE_YUNITS=fraction'])
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_screenoverlay.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<href>http://foo</href>') == -1 or \
data.find('<screenXY x="0.05" xunits="fraction" y="0.05" yunits="fraction"/>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
if data.find('<overlayXY x="10" xunits="pixels" y="20" yunits="pixels"/>') == -1 or \
data.find('<screenXY x="0.4" xunits="fraction" y="0.5" yunits="fraction"/>') == -1 or \
data.find('<size x="1.1" xunits="fraction" y="1.2" yunits="fraction"/>') == -1 or \
data.find('<name>name</name>') == -1 or \
data.find('<description>description</description>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing Model
def ogr_libkml_write_model():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_model.kml")
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn("model", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("heading", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("tilt", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("roll", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("altitudeMode", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("scale_x", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("scale_y", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("scale_z", ogr.OFTReal))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49 10)'))
feat.SetField("tilt", 75)
feat.SetField("roll", 10)
feat.SetField("heading", -70)
feat.SetField("scale_x", 2)
feat.SetField("scale_y", 3)
feat.SetField("scale_z", 4)
feat.SetField("altitudeMode", "relativeToGround")
feat.SetField("model", "http://makc.googlecode.com/svn/trunk/flash/sandy_flar2/cube.dae")
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT (2 49)'))
feat.SetField("model", "http://foo")
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_model.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<longitude>2</longitude>') == -1 or \
data.find('<latitude>49</latitude>') == -1 or \
data.find('<altitude>10</altitude>') == -1 or \
data.find('<altitudeMode>relativeToGround</altitudeMode>') == -1 or \
data.find('<heading>-70</heading>') == -1 or \
data.find('<tilt>75</tilt>') == -1 or \
data.find('<roll>10</roll>') == -1 or \
data.find('<x>2</x>') == -1 or \
data.find('<y>3</y>') == -1 or \
data.find('<z>4</z>') == -1 or \
data.find('<x>1</x>') == -1 or \
data.find('<y>1</y>') == -1 or \
data.find('<z>1</z>') == -1 or \
data.find('<href>http://makc.googlecode.com/svn/trunk/flash/sandy_flar2/cube.dae</href>') == -1 or \
data.find('<href>http://foo</href>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
# This can only appear if HTTP resource is available and GDAL is built with curl/http support
if gdal.GetDriverByName('HTTP') is not None and \
(data.find('<targetHref>http://makc.googlecode.com/svn/trunk/flash/sandy_flar2/cube.gif</targetHref>') == -1 or
data.find('<sourceHref>cube.gif</sourceHref>') == -1):
if gdaltest.gdalurlopen('http://makc.googlecode.com/svn/trunk/flash/sandy_flar2/cube.dae') is not None:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test read / write of style
def ogr_libkml_read_write_style():
if not ogrtest.have_read_libkml:
return 'skip'
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_read_write_style_read.kml', 'wb')
styles = """<Style id="style1">
<IconStyle>
<color>01234567</color>
<scale>1.1</scale>
<heading>50</heading>
<Icon>
<href>http://style1</href>
</Icon>
<hotSpot x="15" y="20"/>
</IconStyle>
<LabelStyle>
<color>01234567</color>
<scale>1.1</scale>
</LabelStyle>
<BalloonStyle>
<bgColor>ff00ffff</bgColor>
<text><![CDATA[This is $[name], whose description is:<br/>$[description]]]></text>
</BalloonStyle>
</Style>
<Style id="style2">
<LineStyle>
<color>01234567</color>
<width>1</width>
</LineStyle>
<PolyStyle>
<color>01234567</color>
</PolyStyle>
</Style>"""
content = """<kml xmlns="http://www.opengis.net/kml/2.2">
<Document>
%s
<StyleMap id="styleMapExample">
<Pair>
<key>normal</key>
<Style id="inline_style">
<IconStyle>
<Icon>
<href>http://inline_style</href>
</Icon>
</IconStyle>
</Style>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#style2</styleUrl>
</Pair>
</StyleMap>
</Document>
</kml>""" % styles
resolved_stylemap = """<Style id="styleMapExample">
<IconStyle>
<Icon>
<href>http://inline_style</href>
</Icon>
</IconStyle>
</Style>"""
resolved_stylemap_highlight = """<Style id="styleMapExample">
<LineStyle>
<color>01234567</color>
<width>1</width>
</LineStyle>
<PolyStyle>
<color>01234567</color>
</PolyStyle>
</Style>"""
gdal.VSIFWriteL(content, 1, len(content), f)
gdal.VSIFCloseL(f)
src_ds = ogr.Open('/vsimem/ogr_libkml_read_write_style_read.kml')
style_table = src_ds.GetStyleTable()
options = ['style1_balloonstyle_bgcolor=#FFFF00',
'style1_balloonstyle_text=This is $[name], whose description is:<br/>$[description]']
ds = ogr.GetDriverByName('LIBKML').CreateDataSource('/vsimem/ogr_libkml_read_write_style_write.kml', options=options)
ds.SetStyleTable(style_table)
ds = None
src_ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_read_write_style_write.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
lines = [l.strip() for l in data.split('\n')]
lines_got = lines[lines.index('<Style id="style1">'):lines.index('<Style id="styleMapExample">')]
lines_ref = [l.strip() for l in styles.split('\n')]
if lines_got != lines_ref:
print(data)
print(styles)
gdaltest.post_reason('failure')
return 'fail'
lines_got = lines[lines.index('<Style id="styleMapExample">'):lines.index('</Document>')]
lines_ref = [l.strip() for l in resolved_stylemap.split('\n')]
if lines_got != lines_ref:
print(data)
print(resolved_stylemap)
gdaltest.post_reason('failure')
return 'fail'
# Test reading highlight style in StyleMap
gdal.SetConfigOption('LIBKML_STYLEMAP_KEY', 'HIGHLIGHT')
src_ds = ogr.Open('/vsimem/ogr_libkml_read_write_style_read.kml')
style_table = src_ds.GetStyleTable()
gdal.SetConfigOption('LIBKML_STYLEMAP_KEY', None)
ds = ogr.GetDriverByName('LIBKML').CreateDataSource('/vsimem/ogr_libkml_read_write_style_write.kml')
ds.SetStyleTable(style_table)
ds = None
src_ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_read_write_style_write.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
lines = [l.strip() for l in data.split('\n')]
lines_got = lines[lines.index('<Style id="styleMapExample">'):lines.index('</Document>')]
lines_ref = [l.strip() for l in resolved_stylemap_highlight.split('\n')]
if lines_got != lines_ref:
print(data)
print(resolved_stylemap_highlight)
gdaltest.post_reason('failure')
return 'fail'
# Test writing feature style
ds = ogr.GetDriverByName('LIBKML').CreateDataSource('/vsimem/ogr_libkml_read_write_style_write.kml')
lyr = ds.CreateLayer('test')
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetStyleString('@unknown_style')
lyr.CreateFeature(feat)
feat = None
feat = ogr.Feature(lyr.GetLayerDefn())
style_string = 'PEN(c:#01234567,w:5.000000px);BRUSH(fc:#01234567);SYMBOL(id:"http://foo",a:50.000000,c:#01234567,s:1.100000);LABEL(c:#01234567,w:150.000000)'
feat.SetStyleString(style_string)
lyr.CreateFeature(feat)
feat = None
ds = None
ds = ogr.Open('/vsimem/ogr_libkml_read_write_style_write.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetStyleString() != '@unknown_style':
print(feat.GetStyleString())
gdaltest.post_reason('failure')
return 'fail'
feat = lyr.GetNextFeature()
if feat.GetStyleString() != style_string:
print(feat.GetStyleString())
gdaltest.post_reason('failure')
return 'fail'
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_read_write_style_write.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
expected_style = """<Style>
<IconStyle>
<color>67452301</color>
<scale>1.1</scale>
<heading>50</heading>
<Icon>
<href>http://foo</href>
</Icon>
</IconStyle>
<LabelStyle>
<color>67452301</color>
<scale>1.5</scale>
</LabelStyle>
<LineStyle>
<color>67452301</color>
<width>5</width>
</LineStyle>
<PolyStyle>
<color>67452301</color>
</PolyStyle>
</Style>"""
lines = [l.strip() for l in data.split('\n')]
lines_got = lines[lines.index('<Style>'):lines.index('</Style>') + 1]
lines_ref = [l.strip() for l in expected_style.split('\n')]
if lines_got != lines_ref:
print(data)
print(resolved_stylemap_highlight)
gdaltest.post_reason('failure')
return 'fail'
# Automatic StyleMap creation testing
ds = ogr.GetDriverByName('LIBKML').CreateDataSource('/vsimem/ogr_libkml_read_write_style_write.kml')
style_table = ogr.StyleTable()
style_table.AddStyle('style1_normal', 'SYMBOL(id:"http://style1_normal",c:#67452301)')
style_table.AddStyle('style1_highlight', 'SYMBOL(id:"http://style1_highlight",c:#10325476)')
ds.SetStyleTable(style_table)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_read_write_style_write.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
lines = [l.strip() for l in data.split('\n')]
expected_styles = """<Style id="style1_normal">
<IconStyle>
<color>01234567</color>
<Icon>
<href>http://style1_normal</href>
</Icon>
</IconStyle>
</Style>
<Style id="style1_highlight">
<IconStyle>
<color>76543210</color>
<Icon>
<href>http://style1_highlight</href>
</Icon>
</IconStyle>
</Style>
<StyleMap id="style1">
<Pair>
<key>normal</key>
<styleUrl>#style1_normal</styleUrl>
</Pair>
<Pair>
<key>highlight</key>
<styleUrl>#style1_highlight</styleUrl>
</Pair>
</StyleMap>"""
lines_got = lines[lines.index('<Style id="style1_normal">'):lines.index('</StyleMap>') + 1]
lines_ref = [l.strip() for l in expected_styles.split('\n')]
if lines_got != lines_ref:
print(data)
print(styles)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing Update
def ogr_libkml_write_update():
if not ogrtest.have_read_libkml:
return 'skip'
for i in range(3):
if i == 0:
name = "/vsimem/ogr_libkml_write_update.kml"
elif i == 1:
name = "/vsimem/ogr_libkml_write_update.kmz"
else:
name = "/vsimem/ogr_libkml_write_update_dir"
ds = ogr.GetDriverByName('LIBKML').CreateDataSource(name,
options=['UPDATE_TARGETHREF=http://foo'])
lyr = ds.CreateLayer('layer_to_edit')
feat = ogr.Feature(lyr.GetLayerDefn())
with gdaltest.error_handler():
lyr.CreateFeature(feat)
feat.SetFID(10)
lyr.CreateFeature(feat)
feat.SetFID(2)
lyr.SetFeature(feat)
lyr.DeleteFeature(3)
ds = None
if i == 0:
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_update.kml', 'rb')
elif i == 1:
f = gdal.VSIFOpenL('/vsizip//vsimem/ogr_libkml_write_update.kmz', 'rb')
else:
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_update_dir/doc.kml', 'rb')
if f is None:
gdaltest.post_reason('Unable to open the write_update file.')
return 'fail'
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<NetworkLinkControl>') == -1 or \
data.find('<Update>') == -1 or \
data.find('<targetHref>http://foo</targetHref>') == -1 or \
data.find('<Placemark/>') == -1 or \
data.find('<Placemark id="layer_to_edit.10"/>') == -1 or \
data.find('<Create>') == -1 or \
data.find('<Document targetId="layer_to_edit">') == -1 or \
data.find('<Change>') == -1 or \
data.find('<Placemark targetId="layer_to_edit.2"/>') == -1 or \
data.find('<Delete>') == -1 or \
data.find('<Placemark targetId="layer_to_edit.3"/>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing NetworkLinkControl
def ogr_libkml_write_networklinkcontrol():
if not ogrtest.have_read_libkml:
return 'skip'
options = ['NLC_MINREFRESHPERIOD=3600',
'NLC_MAXSESSIONLENGTH=-1',
'NLC_COOKIE=cookie',
'NLC_MESSAGE=message',
'NLC_LINKNAME=linkname',
'NLC_LINKDESCRIPTION=linkdescription',
'NLC_LINKSNIPPET=linksnippet',
'NLC_EXPIRES=2014-12-31T23:59:59Z']
for i in range(3):
if i == 0:
name = "/vsimem/ogr_libkml_write_networklinkcontrol.kml"
elif i == 1:
name = "/vsimem/ogr_libkml_write_networklinkcontrol.kmz"
else:
name = "/vsimem/ogr_libkml_write_networklinkcontrol_dir"
ds = ogr.GetDriverByName('LIBKML').CreateDataSource(name, options=options)
if ds is None:
gdaltest.post_reason('Unable to create %s.' % name)
return 'fail'
ds = None
if i == 0:
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_networklinkcontrol.kml', 'rb')
elif i == 1:
f = gdal.VSIFOpenL('/vsizip//vsimem/ogr_libkml_write_networklinkcontrol.kmz', 'rb')
else:
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_networklinkcontrol_dir/doc.kml', 'rb')
if f is None:
gdaltest.post_reason('failure')
return 'fail'
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<minRefreshPeriod>3600</minRefreshPeriod>') == -1 or \
data.find('<maxSessionLength>-1</maxSessionLength>') == -1 or \
data.find('<cookie>cookie</cookie>') == -1 or \
data.find('<message>message</message>') == -1 or \
data.find('<linkName>linkname</linkName>') == -1 or \
data.find('<linkDescription>linkdescription</linkDescription>') == -1 or \
data.find('<linkSnippet>linksnippet</linkSnippet>') == -1 or \
data.find('<expires>2014-12-31T23:59:59Z</expires>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing ListStyle
def ogr_libkml_write_liststyle():
if not ogrtest.have_read_libkml:
return 'skip'
options = ['LISTSTYLE_ICON_HREF=http://www.gdal.org/gdalicon.png']
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_liststyle.kml", options=options)
ds.CreateLayer('test', options=['LISTSTYLE_ICON_HREF=http://foo'])
ds.CreateLayer('test_check', options=['LISTSTYLE_TYPE=check'])
ds.CreateLayer('test_radioFolder', options=['LISTSTYLE_TYPE=radioFolder'])
ds.CreateLayer('test_checkOffOnly', options=['LISTSTYLE_TYPE=checkOffOnly'])
ds.CreateLayer('test_checkHideChildren', options=['LISTSTYLE_TYPE=checkHideChildren'])
with gdaltest.error_handler():
ds.CreateLayer('test_error', options=['LISTSTYLE_TYPE=error'])
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_liststyle.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<styleUrl>#root_doc_liststyle</styleUrl>') == -1 or \
data.find('<Style id="root_doc_liststyle">') == -1 or \
data.find('<href>http://www.gdal.org/gdalicon.png</href>') == -1 or \
data.find('<styleUrl>#test_liststyle</styleUrl>') == -1 or \
data.find('<Style id="test_liststyle">') == -1 or \
data.find('<href>http://foo</href>') == -1 or \
data.find('<listItemType>check</listItemType>') == -1 or \
data.find('<listItemType>radioFolder</listItemType>') == -1 or \
data.find('<listItemType>checkOffOnly</listItemType>') == -1 or \
data.find('<listItemType>checkHideChildren</listItemType>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing NetworkLink
def ogr_libkml_write_networklink():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_networklink.kml")
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('name', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("networklink", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("networklink_refreshvisibility", ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn("networklink_flytoview", ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn("networklink_refreshMode", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("networklink_refreshInterval", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("networklink_viewRefreshMode", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("networklink_viewRefreshTime", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("networklink_viewBoundScale", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("networklink_viewFormat", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("networklink_httpQuery", ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField("name", "a network link")
feat.SetField("networklink", "http://developers.google.com/kml/documentation/Point.kml")
feat.SetField("networklink_refreshVisibility", 1)
feat.SetField("networklink_flyToView", 1)
feat.SetField("networklink_refreshInterval", 60)
feat.SetField("networklink_httpQuery", "[clientVersion]")
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField("networklink", "http://developers.google.com/kml/documentation/Point.kml")
feat.SetField("networklink_viewRefreshTime", 30)
lyr.CreateFeature(feat)
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField("networklink", "http://developers.google.com/kml/documentation/Point.kml")
feat.SetField("networklink_refreshMode", 'onExpire')
feat.SetField("networklink_viewRefreshMode", 'onRegion')
feat.SetField("networklink_viewBoundScale", 0.5)
feat.SetField("networklink_viewFormat", 'BBOX=[bboxWest],[bboxSouth],[bboxEast],[bboxNorth]')
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_networklink.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<name>a network link</name>') == -1 or \
data.find('<refreshVisibility>1</refreshVisibility>') == -1 or \
data.find('<flyToView>1</flyToView>') == -1 or \
data.find('<href>http://developers.google.com/kml/documentation/Point.kml</href>') == -1 or \
data.find('<refreshMode>onInterval</refreshMode>') == -1 or \
data.find('<refreshInterval>60</refreshInterval>') == -1 or \
data.find('<httpQuery>[clientVersion]</httpQuery>') == -1 or \
data.find('<viewRefreshMode>onStop</viewRefreshMode>') == -1 or \
data.find('<viewRefreshTime>30</viewRefreshTime>') == -1 or \
data.find('<refreshMode>onExpire</refreshMode>') == -1 or \
data.find('<viewRefreshMode>onRegion</viewRefreshMode>') == -1 or \
data.find('<viewBoundScale>0.5</viewBoundScale>') == -1 or \
data.find('<viewFormat>BBOX=[bboxWest],[bboxSouth],[bboxEast],[bboxNorth]</viewFormat>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing PhotoOverlay
def ogr_libkml_write_photooverlay():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_photooverlay.kml")
lyr = ds.CreateLayer('test')
lyr.CreateField(ogr.FieldDefn('name', ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("heading", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("tilt", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("roll", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("camera_longitude", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("camera_latitude", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("camera_altitude", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("camera_altitudemode", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("photooverlay", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("leftfov", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("rightfov", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("bottomfov", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("topfov", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("near", ogr.OFTReal))
lyr.CreateField(ogr.FieldDefn("photooverlay_shape", ogr.OFTString))
lyr.CreateField(ogr.FieldDefn("imagepyramid_tilesize", ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn("imagepyramid_maxwidth", ogr.OFTInteger))
lyr.CreateField(ogr.FieldDefn("imagepyramid_maxheight", ogr.OFTInteger))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField("name", "a photo overlay")
feat.SetField("photooverlay", "http://www.gdal.org/gdalicon.png")
feat.SetField("camera_longitude", 2.2946)
feat.SetField("camera_latitude", 48.8583)
feat.SetField("camera_altitude", 20)
feat.SetField("camera_altitudemode", "relativeToGround")
feat.SetField("leftfov", -60)
feat.SetField("rightfov", 59)
feat.SetField("bottomfov", -58)
feat.SetField("topfov", 57)
feat.SetField("near", 100)
feat.SetField("heading", 0)
feat.SetField("tilt", 90)
feat.SetField("roll", 0)
feat.SetField("photooverlay_shape", "rectangle")
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(2.2945 48.85825)'))
lyr.CreateFeature(feat)
feat.SetField("photooverlay", "http://tile.openstreetmap.org/$[level]/$[x]/$[y].png")
feat.SetField("imagepyramid_tilesize", 256)
feat.SetField("imagepyramid_maxwidth", 512)
feat.SetField("imagepyramid_maxheight", 512)
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_photooverlay.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<Camera>') == -1 or \
data.find('<longitude>2.2946</longitude>') == -1 or \
data.find('<latitude>48.8583</latitude>') == -1 or \
data.find('<altitude>20</altitude>') == -1 or \
data.find('<heading>0</heading>') == -1 or \
data.find('<tilt>90</tilt>') == -1 or \
data.find('<roll>0</roll>') == -1 or \
data.find('<altitudeMode>relativeToGround</altitudeMode>') == -1 or \
data.find('<href>http://www.gdal.org/gdalicon.png</href>') == -1 or \
data.find('<leftFov>-60</leftFov>') == -1 or \
data.find('<rightFov>59</rightFov>') == -1 or \
data.find('<bottomFov>-58</bottomFov>') == -1 or \
data.find('<topFov>57</topFov>') == -1 or \
data.find('<near>100</near>') == -1 or \
data.find('2.2945,48.85825,0') == -1 or \
data.find('<shape>rectangle</shape>') == -1 or \
data.find('<href>http://tile.openstreetmap.org/$[level]/$[x]/$[y].png</href>') == -1 or \
data.find('<tileSize>256</tileSize>') == -1 or \
data.find('<maxWidth>512</maxWidth>') == -1 or \
data.find('<maxHeight>512</maxHeight>') == -1 or \
data.find('<gridOrigin>upperLeft</gridOrigin>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing and reading Data element
def ogr_libkml_read_write_data():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_read_write_data.kml")
gdal.SetConfigOption('LIBKML_USE_SIMPLEFIELD', 'NO')
lyr = ds.CreateLayer('test')
gdal.SetConfigOption('LIBKML_USE_SIMPLEFIELD', None)
lyr.CreateField(ogr.FieldDefn('foo', ogr.OFTString))
feat = ogr.Feature(lyr.GetLayerDefn())
feat.SetField("foo", "bar")
feat.SetGeometry(ogr.CreateGeometryFromWkt('POINT(2.2945 48.85825)'))
lyr.CreateFeature(feat)
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_read_write_data.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<Data name="foo">') == -1 or \
data.find('<value>bar</value>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
ds = ogr.Open("/vsimem/ogr_libkml_read_write_data.kml")
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat.GetField('foo') != 'bar':
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing layer as Folder
def ogr_libkml_write_folder():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_folder.kml")
ds.CreateLayer('test', options=['LISTSTYLE_ICON_HREF=http://foo', 'FOLDER=YES'])
ds.CreateLayer('test2', options=['FOLDER=YES'])
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_folder.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<Style id="test_liststyle">') == -1 or \
data.find('<href>http://foo</href>') == -1 or \
data.find('<Folder id="test">') == -1 or \
data.find('<styleUrl>#test_liststyle</styleUrl>') == -1 or \
data.find('<Folder id="test2">') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test writing datasource and layer container propreties
def ogr_libkml_write_container_properties():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.GetDriverByName('LIBKML').CreateDataSource("/vsimem/ogr_libkml_write_container_properties.kml",
options=['NAME=ds_name', 'DESCRIPTION=ds_description', 'OPEN=1', 'VISIBILITY=1', 'SNIPPET=ds_snippet'])
ds.CreateLayer('test', options=['NAME=lyr_name', 'DESCRIPTION=lyr_description', 'OPEN=0', 'VISIBILITY=0', 'SNIPPET=lyr_snippet'])
ds = None
f = gdal.VSIFOpenL('/vsimem/ogr_libkml_write_container_properties.kml', 'rb')
data = gdal.VSIFReadL(1, 2048, f)
data = data.decode('ascii')
gdal.VSIFCloseL(f)
if data.find('<name>ds_name</name>') == -1 or \
data.find('<visibility>1</visibility>') == -1 or \
data.find('<open>1</open>') == -1 or \
data.find('<snippet>ds_snippet</snippet>') == -1 or \
data.find('<description>ds_description</description>') == -1 or \
data.find('<name>lyr_name</name>') == -1 or \
data.find('<visibility>0</visibility>') == -1 or \
data.find('<open>0</open>') == -1 or \
data.find('<snippet>lyr_snippet</snippet>') == -1 or \
data.find('<description>lyr_description</description>') == -1:
print(data)
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test reading gx:TimeStamp and gx:TimeSpan
def ogr_libkml_read_gx_timestamp():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/gxtimestamp.kml')
lyr = ds.GetLayer(0)
f = lyr.GetNextFeature()
if f['timestamp'] != '2016/02/13 12:34:56+00':
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
f = lyr.GetNextFeature()
if f['begin'] != '2016/02/13 00:00:00+00' or f['end'] != '2016/02/14 00:00:00+00':
gdaltest.post_reason('failure')
f.DumpReadable()
return 'fail'
return 'success'
###############################################################################
# Test reading KML with kml: prefix
def ogr_libkml_read_placemark_with_kml_prefix():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/placemark_with_kml_prefix.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
if feat is None:
return 'fail'
return 'success'
###############################################################################
# Test reading KML with dumplicated folder name
def ogr_libkml_read_duplicate_folder_name():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/duplicate_folder_name.kml')
lyr = ds.GetLayer(0)
if lyr.GetName() != 'layer':
gdaltest.post_reason('failure')
print(lyr.GetName())
return 'fail'
lyr = ds.GetLayer(1)
if lyr.GetName() != 'layer (#2)':
gdaltest.post_reason('failure')
print(lyr.GetName())
return 'fail'
return 'success'
###############################################################################
# Test reading KML with a placemark in root document, and a subfolder (#7221)
def ogr_libkml_read_placemark_in_root_and_subfolder():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/placemark_in_root_and_subfolder.kml')
lyr = ds.GetLayerByName('TopLevel')
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('failure')
return 'fail'
lyr = ds.GetLayerByName('SubFolder1')
if lyr is None:
gdaltest.post_reason('failure')
return 'fail'
if lyr.GetFeatureCount() != 1:
gdaltest.post_reason('failure')
return 'fail'
return 'success'
###############################################################################
# Test reading KML with coordinate tuples separated by tabulations (#7231)
def ogr_libkml_read_tab_separated_coord_triplet():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/tab_separated_coord_triplet.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
wkt = 'LINESTRING Z (1 2 3,4 5 6)'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
return 'success'
###############################################################################
# Test reading KML with coordinate with space only content (#7232)
def ogr_libkml_read_kml_with_space_content_in_coordinates():
if not ogrtest.have_read_libkml:
return 'skip'
ds = ogr.Open('data/kml_with_space_content_in_coordinates.kml')
lyr = ds.GetLayer(0)
feat = lyr.GetNextFeature()
wkt = 'LINESTRING EMPTY'
if ogrtest.check_feature_geometry(feat, wkt):
return 'fail'
return 'success'
###############################################################################
# Cleanup
def ogr_libkml_cleanup():
if not ogrtest.have_read_libkml:
return 'skip'
if ogrtest.kml_ds is not None:
ogrtest.kml_ds = None
gdal.Unlink('/vsimem/libkml.kml')
gdal.Unlink('/vsimem/libkml.kmz')
gdal.Unlink('/vsimem/libkml_use_doc_off.kmz')
gdal.Unlink("/vsimem/ogr_libkml_camera.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_layer_lookat.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_layer_camera.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_multigeometry.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_snippet.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_atom_author.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_atom_link.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_phonenumber.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_region.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_screenoverlay.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_model.kml")
gdal.Unlink("/vsimem/ogr_libkml_read_write_style_read.kml")
gdal.Unlink("/vsimem/ogr_libkml_read_write_style_write.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_update.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_update.kmz")
gdal.Unlink("/vsimem/ogr_libkml_write_update_dir/doc.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_update_dir")
gdal.Unlink("/vsimem/ogr_libkml_write_networklinkcontrol.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_networklinkcontrol.kmz")
gdal.Unlink("/vsimem/ogr_libkml_write_networklinkcontrol_dir/doc.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_networklinkcontrol_dir")
gdal.Unlink("/vsimem/ogr_libkml_write_liststyle.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_networklink.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_photooverlay.kml")
gdal.Unlink("/vsimem/ogr_libkml_read_write_data.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_folder.kml")
gdal.Unlink("/vsimem/ogr_libkml_write_container_properties.kml")
# Re-register KML driver if necessary
if ogrtest.kml_drv is not None:
print('Re-register KML driver')
ogrtest.kml_drv.Register()
return 'success'
###############################################################################
# Build tests runner
gdaltest_list = [
ogr_libkml_datastore,
ogr_libkml_attributes_1,
ogr_libkml_attributes_2,
ogr_libkml_attributes_3,
ogr_libkml_attributes_4,
ogr_libkml_point_read,
ogr_libkml_linestring_read,
ogr_libkml_polygon_read,
ogr_libkml_write_kml,
ogr_libkml_check_write_kml,
ogr_libkml_write_kmz,
ogr_libkml_check_write_kmz,
ogr_libkml_write_kmz_use_doc_off,
ogr_libkml_check_write_kmz_use_doc_off,
ogr_libkml_write_dir,
ogr_libkml_check_write_dir,
ogr_libkml_xml_attributes,
ogr_libkml_read_geometries,
ogr_libkml_test_ogrsf,
ogr_libkml_read_placemark,
ogr_libkml_read_empty,
ogr_libkml_read_emptylayers,
ogr_libkml_read_emptylayers_without_folder,
ogr_libkml_read_schema,
ogr_libkml_extended_data_without_schema_data,
ogr_libkml_gxtrack,
ogr_libkml_gxmultitrack,
ogr_libkml_camera,
ogr_libkml_write_layer_lookat,
ogr_libkml_write_layer_camera,
ogr_libkml_write_multigeometry,
ogr_libkml_write_snippet,
ogr_libkml_write_atom_author,
ogr_libkml_write_atom_link,
ogr_libkml_write_phonenumber,
ogr_libkml_write_region,
ogr_libkml_write_screenoverlay,
ogr_libkml_write_model,
ogr_libkml_read_write_style,
ogr_libkml_write_update,
ogr_libkml_write_networklinkcontrol,
ogr_libkml_write_liststyle,
ogr_libkml_write_networklink,
ogr_libkml_write_photooverlay,
ogr_libkml_read_write_data,
ogr_libkml_write_folder,
ogr_libkml_write_container_properties,
ogr_libkml_read_gx_timestamp,
ogr_libkml_read_placemark_with_kml_prefix,
ogr_libkml_read_duplicate_folder_name,
ogr_libkml_read_placemark_in_root_and_subfolder,
ogr_libkml_read_tab_separated_coord_triplet,
ogr_libkml_read_kml_with_space_content_in_coordinates,
ogr_libkml_cleanup]
if __name__ == '__main__':
gdaltest.setup_run('ogr_libkml')
gdaltest.run_tests(gdaltest_list)
gdaltest.summarize()
|
py | b409236fc05e8661e136c76d4b780becbf0ee19d | #!/usr/bin/env python3
"""Manifest file to key-value files."""
import argparse
import functools
from pathlib import Path
from utils.utility import add_arguments
from utils.utility import print_arguments
from utils.utility import read_manifest
def main(args):
print_arguments(args, globals())
count = 0
outdir = Path(args.output_path)
wav_scp = outdir / 'wav.scp'
dur_scp = outdir / 'duration'
text_scp = outdir / 'text'
manifest_jsons = read_manifest(args.manifest_path)
with wav_scp.open('w') as fwav, dur_scp.open('w') as fdur, text_scp.open(
'w') as ftxt:
for line_json in manifest_jsons:
utt = line_json['utt']
feat = line_json['feat']
file_ext = Path(feat).suffix # .wav
text = line_json['text']
feat_shape = line_json['feat_shape']
dur = feat_shape[0]
feat_dim = feat_shape[1]
if 'token' in line_json:
tokens = line_json['token']
tokenids = line_json['token_id']
token_shape = line_json['token_shape']
token_len = token_shape[0]
vocab_dim = token_shape[1]
if file_ext == '.wav':
fwav.write(f"{utt} {feat}\n")
fdur.write(f"{utt} {dur}\n")
ftxt.write(f"{utt} {text}\n")
count += 1
print(f"Examples number: {count}")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('manifest_path', str,
'data/librispeech/manifest.train',
"Filepath of manifest to compute normalizer's mean and stddev.")
add_arg('output_path', str,
'data/train',
"dir path to dump wav.scp/duaration/text files.")
# yapf: disable
args = parser.parse_args()
main(args)
|
py | b4092445e8399a2a235cf269e6a952d7797ff8be | import behave
import os
import random
@behave.when(u'I set dataset readonly mode to "{state}"')
def step_impl(context, state):
state = state == 'True'
context.project.datasets.set_readonly(dataset=context.dataset, state=state)
def item_uploaded_successfully(context):
try:
item_local_path = os.path.join(os.environ["DATALOOP_TEST_ASSETS"], '0000000162.png')
item = context.dataset.items.upload(
local_path=item_local_path,
remote_name='name-{}'.format(random.randrange(100, 10000))
)
item_uploaded = isinstance(item, context.dl.Item)
except Exception:
item_uploaded = False
return item_uploaded
def dataset_updated_successfully(context):
try:
context.dataset.name = 'name-{}'.format(random.randrange(100, 10000))
context.dataset.update()
dataset_updated = True
except Exception:
dataset_updated = False
return dataset_updated
@behave.then(u'Dataset is in readonly mode')
def step_impl(context):
assert context.dataset.readonly
assert not item_uploaded_successfully(context=context)
assert not dataset_updated_successfully(context=context)
@behave.then(u'Dataset is not in readonly mode')
def step_impl(context):
assert not context.dataset.readonly
assert item_uploaded_successfully(context=context)
assert dataset_updated_successfully(context=context)
|
py | b40924b1c3c06abce130742c35ff85982577fc28 | #
# Uncomplicated VM Builder
# Copyright (C) 2007-2009 Canonical Ltd.
#
# See AUTHORS for list of contributors
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Hypervisor super class
import logging
import os
import VMBuilder.distro
import VMBuilder.disk
from VMBuilder.util import run_cmd, tmpdir
STORAGE_DISK_IMAGE = 0
STORAGE_FS_IMAGE = 1
class Hypervisor(VMBuilder.distro.Context):
preferred_storage = STORAGE_DISK_IMAGE
def __init__(self, distro):
self.plugin_classes = VMBuilder._hypervisor_plugins
super(Hypervisor, self).__init__()
self.plugins += [distro]
self.distro = distro
self.filesystems = []
self.disks = []
self.nics = []
def add_filesystem(self, *args, **kwargs):
"""Adds a filesystem to the virtual machine"""
from VMBuilder.disk import Filesystem
fs = Filesystem(self, *args, **kwargs)
self.filesystems.append(fs)
return fs
def add_disk(self, *args, **kwargs):
"""Adds a disk image to the virtual machine"""
from VMBuilder.disk import Disk
disk = Disk(self, *args, **kwargs)
self.disks.append(disk)
return disk
def install_os(self):
self.nics = [self.NIC()]
self.call_hooks('preflight_check')
self.call_hooks('configure_networking', self.nics)
self.call_hooks('configure_mounting', self.disks, self.filesystems)
self.chroot_dir = tmpdir()
self.call_hooks('mount_partitions', self.chroot_dir)
run_cmd('rsync', '-aHA', '%s/' % self.distro.chroot_dir, self.chroot_dir)
self.distro.set_chroot_dir(self.chroot_dir)
if self.needs_bootloader:
self.call_hooks('install_bootloader', self.chroot_dir, self.disks)
self.call_hooks('install_kernel', self.chroot_dir)
self.distro.call_hooks('post_install')
self.call_hooks('unmount_partitions')
os.rmdir(self.chroot_dir)
def finalise(self, destdir):
self.call_hooks('convert',
self.preferred_storage == STORAGE_DISK_IMAGE and self.disks or self.filesystems,
destdir)
self.call_hooks('deploy', destdir)
def mount_partitions(self, mntdir):
"""Mounts all the vm's partitions and filesystems below .rootmnt"""
logging.info('Mounting target filesystems')
for fs in self.filesystems:
fs.create()
fs.mkfs()
for disk in self.disks:
disk.create()
disk.partition()
disk.map_partitions()
disk.mkfs()
fss = VMBuilder.disk.get_ordered_filesystems(self)
for fs in fss:
fs.mount(mntdir)
self.distro.post_mount(fs)
def unmount_partitions(self):
"""Unmounts all the vm's partitions and filesystems"""
logging.info('Unmounting target filesystem')
fss = VMBuilder.disk.get_ordered_filesystems(self)
fss.reverse()
for fs in fss:
fs.umount()
for disk in self.disks:
disk.unmap()
def convert_disks(self, disks, destdir):
for disk in disks:
disk.convert(destdir, self.filetype)
class NIC(object):
def __init__(self, type='dhcp', ip=None, network=None, netmask=None,
broadcast=None, dns=None, gateway=None):
self.type = type
self.ip = ip
self.network = network
self.netmask = netmask
self.broadcast = broadcast
self.dns = dns
self.gateway = gateway
|
py | b409250e937c373211cea3a72363f51482b6a59c | from meddlr.modeling.layers import build, conv, gauss
from meddlr.modeling.layers.build import ( # noqa: F401
CUSTOM_LAYERS_REGISTRY,
get_layer_kind,
get_layer_type,
)
from meddlr.modeling.layers.conv import ConvWS2d, ConvWS3d # noqa: F401
from meddlr.modeling.layers.gauss import GaussianBlur # noqa: F401
__all__ = []
__all__.extend(build.__all__)
__all__.extend(conv.__all__)
__all__.extend(gauss.__all__)
|
py | b40925b51d1cdceb7a09a1a54bc99a8f0035bc6d | import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))
|
py | b40926c0cdf60497574ee60a637c6b163f22849f | import weakref, sys
from rpython.rlib import jit, objectmodel, debug, rerased
from rpython.rlib.rarithmetic import intmask, r_uint
from pypy.interpreter.baseobjspace import W_Root
from pypy.objspace.std.dictmultiobject import (
W_DictMultiObject, DictStrategy, ObjectDictStrategy, BaseKeyIterator,
BaseValueIterator, BaseItemIterator, _never_equal_to_string,
W_DictObject,
)
from pypy.objspace.std.typeobject import MutableCell
erase_item, unerase_item = rerased.new_erasing_pair("mapdict storage item")
erase_map, unerase_map = rerased.new_erasing_pair("map")
erase_list, unerase_list = rerased.new_erasing_pair("mapdict storage list")
# ____________________________________________________________
# attribute shapes
NUM_DIGITS = 4
NUM_DIGITS_POW2 = 1 << NUM_DIGITS
# note: we use "x * NUM_DIGITS_POW2" instead of "x << NUM_DIGITS" because
# we want to propagate knowledge that the result cannot be negative
class AbstractAttribute(object):
_immutable_fields_ = ['terminator']
cache_attrs = None
_size_estimate = 0
def __init__(self, space, terminator):
self.space = space
assert isinstance(terminator, Terminator)
self.terminator = terminator
def read(self, obj, name, index):
attr = self.find_map_attr(name, index)
if attr is None:
return self.terminator._read_terminator(obj, name, index)
if (
jit.isconstant(attr.storageindex) and
jit.isconstant(obj) and
not attr.ever_mutated
):
return self._pure_mapdict_read_storage(obj, attr.storageindex)
else:
return obj._mapdict_read_storage(attr.storageindex)
@jit.elidable
def _pure_mapdict_read_storage(self, obj, storageindex):
return obj._mapdict_read_storage(storageindex)
def write(self, obj, name, index, w_value):
attr = self.find_map_attr(name, index)
if attr is None:
return self.terminator._write_terminator(obj, name, index, w_value)
if not attr.ever_mutated:
attr.ever_mutated = True
obj._mapdict_write_storage(attr.storageindex, w_value)
return True
def delete(self, obj, name, index):
pass
@jit.elidable
def find_map_attr(self, name, index):
if (self.space.config.objspace.std.withmethodcache):
return self._find_map_attr_cache(name, index)
return self._find_map_attr(name, index)
@jit.dont_look_inside
def _find_map_attr_cache(self, name, index):
space = self.space
cache = space.fromcache(MapAttrCache)
SHIFT2 = r_uint.BITS - space.config.objspace.std.methodcachesizeexp
SHIFT1 = SHIFT2 - 5
attrs_as_int = objectmodel.current_object_addr_as_int(self)
# ^^^Note: see comment in typeobject.py for
# _pure_lookup_where_with_method_cache()
# unrolled hash computation for 2-tuple
c1 = 0x345678
c2 = 1000003
hash_name = objectmodel.compute_hash(name)
hash_selector = intmask((c2 * ((c2 * c1) ^ hash_name)) ^ index)
product = intmask(attrs_as_int * hash_selector)
attr_hash = (r_uint(product) ^ (r_uint(product) << SHIFT1)) >> SHIFT2
# ^^^Note2: same comment too
cached_attr = cache.attrs[attr_hash]
if cached_attr is self:
cached_name = cache.names[attr_hash]
cached_index = cache.indexes[attr_hash]
if cached_name == name and cached_index == index:
attr = cache.cached_attrs[attr_hash]
if space.config.objspace.std.withmethodcachecounter:
cache.hits[name] = cache.hits.get(name, 0) + 1
return attr
attr = self._find_map_attr(name, index)
cache.attrs[attr_hash] = self
cache.names[attr_hash] = name
cache.indexes[attr_hash] = index
cache.cached_attrs[attr_hash] = attr
if space.config.objspace.std.withmethodcachecounter:
cache.misses[name] = cache.misses.get(name, 0) + 1
return attr
def _find_map_attr(self, name, index):
while isinstance(self, PlainAttribute):
if index == self.index and name == self.name:
return self
self = self.back
return None
def copy(self, obj):
raise NotImplementedError("abstract base class")
def length(self):
raise NotImplementedError("abstract base class")
def get_terminator(self):
return self.terminator
def set_terminator(self, obj, terminator):
raise NotImplementedError("abstract base class")
@jit.elidable
def size_estimate(self):
return self._size_estimate >> NUM_DIGITS
def search(self, attrtype):
return None
@jit.elidable
def _get_new_attr(self, name, index):
cache = self.cache_attrs
if cache is None:
cache = self.cache_attrs = {}
attr = cache.get((name, index), None)
if attr is None:
attr = PlainAttribute(name, index, self)
cache[name, index] = attr
return attr
def add_attr(self, obj, name, index, w_value):
self._reorder_and_add(obj, name, index, w_value)
if not jit.we_are_jitted():
oldattr = self
attr = obj._get_mapdict_map()
size_est = (oldattr._size_estimate + attr.size_estimate()
- oldattr.size_estimate())
assert size_est >= (oldattr.length() * NUM_DIGITS_POW2)
oldattr._size_estimate = size_est
def _add_attr_without_reordering(self, obj, name, index, w_value):
attr = self._get_new_attr(name, index)
attr._switch_map_and_write_storage(obj, w_value)
@jit.unroll_safe
def _switch_map_and_write_storage(self, obj, w_value):
if self.length() > obj._mapdict_storage_length():
# note that self.size_estimate() is always at least self.length()
new_storage = [None] * self.size_estimate()
for i in range(obj._mapdict_storage_length()):
new_storage[i] = obj._mapdict_read_storage(i)
obj._set_mapdict_storage_and_map(new_storage, self)
# the order is important here: first change the map, then the storage,
# for the benefit of the special subclasses
obj._set_mapdict_map(self)
obj._mapdict_write_storage(self.storageindex, w_value)
@jit.elidable
def _find_branch_to_move_into(self, name, index):
# walk up the map chain to find an ancestor with lower order that
# already has the current name as a child inserted
current_order = sys.maxint
number_to_readd = 0
current = self
key = (name, index)
while True:
attr = None
if current.cache_attrs is not None:
attr = current.cache_attrs.get(key, None)
if attr is None or attr.order > current_order:
# we reached the top, so we didn't find it anywhere,
# just add it to the top attribute
if not isinstance(current, PlainAttribute):
return 0, self._get_new_attr(name, index)
else:
return number_to_readd, attr
# if not found try parent
number_to_readd += 1
current_order = current.order
current = current.back
@jit.look_inside_iff(lambda self, obj, name, index, w_value:
jit.isconstant(self) and
jit.isconstant(name) and
jit.isconstant(index))
def _reorder_and_add(self, obj, name, index, w_value):
# the idea is as follows: the subtrees of any map are ordered by
# insertion. the invariant is that subtrees that are inserted later
# must not contain the name of the attribute of any earlier inserted
# attribute anywhere
# m______
# inserted first / \ ... \ further attributes
# attrname a 0/ 1\ n\
# m a must not appear here anywhere
#
# when inserting a new attribute in an object we check whether any
# parent of lower order has seen that attribute yet. if yes, we follow
# that branch. if not, we normally append that attribute. When we
# follow a prior branch, we necessarily remove some attributes to be
# able to do that. They need to be re-added, which has to follow the
# reordering procedure recusively.
# we store the to-be-readded attribute in the stack, with the map and
# the value paired up those are lazily initialized to a list large
# enough to store all current attributes
stack = None
stack_index = 0
while True:
current = self
number_to_readd = 0
number_to_readd, attr = self._find_branch_to_move_into(name, index)
# we found the attributes further up, need to save the
# previous values of the attributes we passed
if number_to_readd:
if stack is None:
stack = [erase_map(None)] * (self.length() * 2)
current = self
for i in range(number_to_readd):
assert isinstance(current, PlainAttribute)
w_self_value = obj._mapdict_read_storage(
current.storageindex)
stack[stack_index] = erase_map(current)
stack[stack_index + 1] = erase_item(w_self_value)
stack_index += 2
current = current.back
attr._switch_map_and_write_storage(obj, w_value)
if not stack_index:
return
# readd the current top of the stack
stack_index -= 2
next_map = unerase_map(stack[stack_index])
w_value = unerase_item(stack[stack_index + 1])
name = next_map.name
index = next_map.index
self = obj._get_mapdict_map()
def materialize_r_dict(self, space, obj, dict_w):
raise NotImplementedError("abstract base class")
def remove_dict_entries(self, obj):
raise NotImplementedError("abstract base class")
def __repr__(self):
return "<%s>" % (self.__class__.__name__,)
class Terminator(AbstractAttribute):
_immutable_fields_ = ['w_cls']
def __init__(self, space, w_cls):
AbstractAttribute.__init__(self, space, self)
self.w_cls = w_cls
def _read_terminator(self, obj, name, index):
return None
def _write_terminator(self, obj, name, index, w_value):
obj._get_mapdict_map().add_attr(obj, name, index, w_value)
return True
def copy(self, obj):
result = Object()
result.space = self.space
result._init_empty(self)
return result
def length(self):
return 0
def set_terminator(self, obj, terminator):
result = Object()
result.space = self.space
result._init_empty(terminator)
return result
def remove_dict_entries(self, obj):
return self.copy(obj)
def __repr__(self):
return "<%s w_cls=%s>" % (self.__class__.__name__, self.w_cls)
class DictTerminator(Terminator):
_immutable_fields_ = ['devolved_dict_terminator']
def __init__(self, space, w_cls):
Terminator.__init__(self, space, w_cls)
self.devolved_dict_terminator = DevolvedDictTerminator(space, w_cls)
def materialize_r_dict(self, space, obj, dict_w):
result = Object()
result.space = space
result._init_empty(self.devolved_dict_terminator)
return result
class NoDictTerminator(Terminator):
def _write_terminator(self, obj, name, index, w_value):
if index == DICT:
return False
return Terminator._write_terminator(self, obj, name, index, w_value)
class DevolvedDictTerminator(Terminator):
def _read_terminator(self, obj, name, index):
if index == DICT:
space = self.space
w_dict = obj.getdict(space)
return space.finditem_str(w_dict, name)
return Terminator._read_terminator(self, obj, name, index)
def _write_terminator(self, obj, name, index, w_value):
if index == DICT:
space = self.space
w_dict = obj.getdict(space)
space.setitem_str(w_dict, name, w_value)
return True
return Terminator._write_terminator(self, obj, name, index, w_value)
def delete(self, obj, name, index):
from pypy.interpreter.error import OperationError
if index == DICT:
space = self.space
w_dict = obj.getdict(space)
try:
space.delitem(w_dict, space.wrap(name))
except OperationError, ex:
if not ex.match(space, space.w_KeyError):
raise
return Terminator.copy(self, obj)
return Terminator.delete(self, obj, name, index)
def remove_dict_entries(self, obj):
assert 0, "should be unreachable"
def set_terminator(self, obj, terminator):
if not isinstance(terminator, DevolvedDictTerminator):
assert isinstance(terminator, DictTerminator)
terminator = terminator.devolved_dict_terminator
return Terminator.set_terminator(self, obj, terminator)
class PlainAttribute(AbstractAttribute):
_immutable_fields_ = ['name', 'index', 'storageindex', 'back', 'ever_mutated?', 'order']
def __init__(self, name, index, back):
AbstractAttribute.__init__(self, back.space, back.terminator)
self.name = name
self.index = index
self.storageindex = back.length()
self.back = back
self._size_estimate = self.length() * NUM_DIGITS_POW2
self.ever_mutated = False
self.order = len(back.cache_attrs) if back.cache_attrs else 0
def _copy_attr(self, obj, new_obj):
w_value = self.read(obj, self.name, self.index)
new_obj._get_mapdict_map().add_attr(new_obj, self.name, self.index, w_value)
def delete(self, obj, name, index):
if index == self.index and name == self.name:
# ok, attribute is deleted
if not self.ever_mutated:
self.ever_mutated = True
return self.back.copy(obj)
new_obj = self.back.delete(obj, name, index)
if new_obj is not None:
self._copy_attr(obj, new_obj)
return new_obj
def copy(self, obj):
new_obj = self.back.copy(obj)
self._copy_attr(obj, new_obj)
return new_obj
def length(self):
return self.storageindex + 1
def set_terminator(self, obj, terminator):
new_obj = self.back.set_terminator(obj, terminator)
self._copy_attr(obj, new_obj)
return new_obj
def search(self, attrtype):
if self.index == attrtype:
return self
return self.back.search(attrtype)
def materialize_r_dict(self, space, obj, dict_w):
new_obj = self.back.materialize_r_dict(space, obj, dict_w)
if self.index == DICT:
w_attr = space.wrap(self.name)
dict_w[w_attr] = obj._mapdict_read_storage(self.storageindex)
else:
self._copy_attr(obj, new_obj)
return new_obj
def remove_dict_entries(self, obj):
new_obj = self.back.remove_dict_entries(obj)
if self.index != DICT:
self._copy_attr(obj, new_obj)
return new_obj
def __repr__(self):
return "<PlainAttribute %s %s %s %r>" % (self.name, self.index, self.storageindex, self.back)
def _become(w_obj, new_obj):
# this is like the _become method, really, but we cannot use that due to
# RPython reasons
w_obj._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
class MapAttrCache(object):
def __init__(self, space):
assert space.config.objspace.std.withmethodcache
SIZE = 1 << space.config.objspace.std.methodcachesizeexp
self.attrs = [None] * SIZE
self.names = [None] * SIZE
self.indexes = [INVALID] * SIZE
self.cached_attrs = [None] * SIZE
if space.config.objspace.std.withmethodcachecounter:
self.hits = {}
self.misses = {}
def clear(self):
for i in range(len(self.attrs)):
self.attrs[i] = None
for i in range(len(self.names)):
self.names[i] = None
self.indexes[i] = INVALID
for i in range(len(self.cached_attrs)):
self.cached_attrs[i] = None
# ____________________________________________________________
# object implementation
DICT = 0
SPECIAL = 1
INVALID = 2
SLOTS_STARTING_FROM = 3
class BaseMapdictObject:
_mixin_ = True
def _init_empty(self, map):
raise NotImplementedError("abstract base class")
def _become(self, new_obj):
self._set_mapdict_storage_and_map(new_obj.storage, new_obj.map)
def _get_mapdict_map(self):
return jit.promote(self.map)
def _set_mapdict_map(self, map):
self.map = map
# _____________________________________________
# objspace interface
def getdictvalue(self, space, attrname):
return self._get_mapdict_map().read(self, attrname, DICT)
def setdictvalue(self, space, attrname, w_value):
return self._get_mapdict_map().write(self, attrname, DICT, w_value)
def deldictvalue(self, space, attrname):
new_obj = self._get_mapdict_map().delete(self, attrname, DICT)
if new_obj is None:
return False
self._become(new_obj)
return True
def getdict(self, space):
w_dict = self._get_mapdict_map().read(self, "dict", SPECIAL)
if w_dict is not None:
assert isinstance(w_dict, W_DictMultiObject)
return w_dict
strategy = space.fromcache(MapDictStrategy)
storage = strategy.erase(self)
w_dict = W_DictObject(space, strategy, storage)
flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict)
assert flag
return w_dict
def setdict(self, space, w_dict):
from pypy.interpreter.typedef import check_new_dictionary
w_dict = check_new_dictionary(space, w_dict)
w_olddict = self.getdict(space)
assert isinstance(w_dict, W_DictMultiObject)
# The old dict has got 'self' as dstorage, but we are about to
# change self's ("dict", SPECIAL) attribute to point to the
# new dict. If the old dict was using the MapDictStrategy, we
# have to force it now: otherwise it would remain an empty
# shell that continues to delegate to 'self'.
if type(w_olddict.get_strategy()) is MapDictStrategy:
w_olddict.get_strategy().switch_to_object_strategy(w_olddict)
flag = self._get_mapdict_map().write(self, "dict", SPECIAL, w_dict)
assert flag
def getclass(self, space):
return self._get_mapdict_map().terminator.w_cls
def setclass(self, space, w_cls):
new_obj = self._get_mapdict_map().set_terminator(self, w_cls.terminator)
self._become(new_obj)
def user_setup(self, space, w_subtype):
from pypy.module.__builtin__.interp_classobj import W_InstanceObject
self.space = space
assert (not self.typedef.hasdict or
self.typedef is W_InstanceObject.typedef)
self._init_empty(w_subtype.terminator)
def getslotvalue(self, slotindex):
index = SLOTS_STARTING_FROM + slotindex
return self._get_mapdict_map().read(self, "slot", index)
def setslotvalue(self, slotindex, w_value):
index = SLOTS_STARTING_FROM + slotindex
self._get_mapdict_map().write(self, "slot", index, w_value)
def delslotvalue(self, slotindex):
index = SLOTS_STARTING_FROM + slotindex
new_obj = self._get_mapdict_map().delete(self, "slot", index)
if new_obj is None:
return False
self._become(new_obj)
return True
# used by _weakref implemenation
def getweakref(self):
from pypy.module._weakref.interp__weakref import WeakrefLifeline
lifeline = self._get_mapdict_map().read(self, "weakref", SPECIAL)
if lifeline is None:
return None
assert isinstance(lifeline, WeakrefLifeline)
return lifeline
getweakref._cannot_really_call_random_things_ = True
def setweakref(self, space, weakreflifeline):
from pypy.module._weakref.interp__weakref import WeakrefLifeline
assert isinstance(weakreflifeline, WeakrefLifeline)
self._get_mapdict_map().write(self, "weakref", SPECIAL, weakreflifeline)
setweakref._cannot_really_call_random_things_ = True
def delweakref(self):
self._get_mapdict_map().write(self, "weakref", SPECIAL, None)
delweakref._cannot_really_call_random_things_ = True
class ObjectMixin(object):
_mixin_ = True
def _init_empty(self, map):
from rpython.rlib.debug import make_sure_not_resized
self.map = map
self.storage = make_sure_not_resized([None] * map.size_estimate())
def _mapdict_read_storage(self, storageindex):
assert storageindex >= 0
return self.storage[storageindex]
def _mapdict_write_storage(self, storageindex, value):
self.storage[storageindex] = value
def _mapdict_storage_length(self):
return len(self.storage)
def _set_mapdict_storage_and_map(self, storage, map):
self.storage = storage
self.map = map
class Object(ObjectMixin, BaseMapdictObject, W_Root):
pass # mainly for tests
def get_subclass_of_correct_size(space, cls, w_type):
assert space.config.objspace.std.withmapdict
map = w_type.terminator
classes = memo_get_subclass_of_correct_size(space, cls)
if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS:
return classes[0]
size = map.size_estimate()
debug.check_nonneg(size)
if size < len(classes):
return classes[size]
else:
return classes[len(classes)-1]
get_subclass_of_correct_size._annspecialcase_ = "specialize:arg(1)"
SUBCLASSES_MIN_FIELDS = 5 # XXX tweak these numbers
SUBCLASSES_MAX_FIELDS = 5
def memo_get_subclass_of_correct_size(space, supercls):
key = space, supercls
try:
return _subclass_cache[key]
except KeyError:
assert not hasattr(supercls, "__del__")
result = []
for i in range(SUBCLASSES_MIN_FIELDS, SUBCLASSES_MAX_FIELDS+1):
result.append(_make_subclass_size_n(supercls, i))
for i in range(SUBCLASSES_MIN_FIELDS):
result.insert(0, result[0])
if SUBCLASSES_MIN_FIELDS == SUBCLASSES_MAX_FIELDS:
assert len(set(result)) == 1
_subclass_cache[key] = result
return result
memo_get_subclass_of_correct_size._annspecialcase_ = "specialize:memo"
_subclass_cache = {}
def _make_subclass_size_n(supercls, n):
from rpython.rlib import unroll
rangen = unroll.unrolling_iterable(range(n))
nmin1 = n - 1
rangenmin1 = unroll.unrolling_iterable(range(nmin1))
valnmin1 = "_value%s" % nmin1
class subcls(BaseMapdictObject, supercls):
def _init_empty(self, map):
for i in rangenmin1:
setattr(self, "_value%s" % i, None)
setattr(self, valnmin1, erase_item(None))
self.map = map
def _has_storage_list(self):
return self.map.length() > n
def _mapdict_get_storage_list(self):
erased = getattr(self, valnmin1)
return unerase_list(erased)
def _mapdict_read_storage(self, storageindex):
assert storageindex >= 0
if storageindex < nmin1:
for i in rangenmin1:
if storageindex == i:
return getattr(self, "_value%s" % i)
if self._has_storage_list():
return self._mapdict_get_storage_list()[storageindex - nmin1]
erased = getattr(self, "_value%s" % nmin1)
return unerase_item(erased)
def _mapdict_write_storage(self, storageindex, value):
for i in rangenmin1:
if storageindex == i:
setattr(self, "_value%s" % i, value)
return
if self._has_storage_list():
self._mapdict_get_storage_list()[storageindex - nmin1] = value
return
setattr(self, "_value%s" % nmin1, erase_item(value))
def _mapdict_storage_length(self):
if self._has_storage_list():
return len(self._mapdict_get_storage_list()) + (n - 1)
return n
def _set_mapdict_storage_and_map(self, storage, map):
self.map = map
len_storage = len(storage)
for i in rangenmin1:
if i < len_storage:
erased = storage[i]
else:
erased = None
setattr(self, "_value%s" % i, erased)
has_storage_list = self._has_storage_list()
if len_storage < n:
assert not has_storage_list
erased = erase_item(None)
elif len_storage == n:
assert not has_storage_list
erased = erase_item(storage[nmin1])
elif not has_storage_list:
# storage is longer than self.map.length() only due to
# overallocation
erased = erase_item(storage[nmin1])
# in theory, we should be ultra-paranoid and check all entries,
# but checking just one should catch most problems anyway:
assert storage[n] is None
else:
storage_list = storage[nmin1:]
erased = erase_list(storage_list)
setattr(self, "_value%s" % nmin1, erased)
subcls.__name__ = supercls.__name__ + "Size%s" % n
return subcls
# ____________________________________________________________
# dict implementation
def get_terminator_for_dicts(space):
return DictTerminator(space, None)
class MapDictStrategy(DictStrategy):
erase, unerase = rerased.new_erasing_pair("map")
erase = staticmethod(erase)
unerase = staticmethod(unerase)
def __init__(self, space):
self.space = space
def get_empty_storage(self):
w_result = Object()
terminator = self.space.fromcache(get_terminator_for_dicts)
w_result._init_empty(terminator)
return self.erase(w_result)
def switch_to_object_strategy(self, w_dict):
w_obj = self.unerase(w_dict.dstorage)
strategy = self.space.fromcache(ObjectDictStrategy)
dict_w = strategy.unerase(strategy.get_empty_storage())
w_dict.set_strategy(strategy)
w_dict.dstorage = strategy.erase(dict_w)
assert w_obj.getdict(self.space) is w_dict or w_obj._get_mapdict_map().terminator.w_cls is None
materialize_r_dict(self.space, w_obj, dict_w)
def getitem(self, w_dict, w_key):
space = self.space
w_lookup_type = space.type(w_key)
if space.is_w(w_lookup_type, space.w_str):
return self.getitem_str(w_dict, space.str_w(w_key))
elif _never_equal_to_string(space, w_lookup_type):
return None
else:
self.switch_to_object_strategy(w_dict)
return w_dict.getitem(w_key)
def getitem_str(self, w_dict, key):
w_obj = self.unerase(w_dict.dstorage)
return w_obj.getdictvalue(self.space, key)
def setitem_str(self, w_dict, key, w_value):
w_obj = self.unerase(w_dict.dstorage)
flag = w_obj.setdictvalue(self.space, key, w_value)
assert flag
def setitem(self, w_dict, w_key, w_value):
space = self.space
if space.is_w(space.type(w_key), space.w_str):
self.setitem_str(w_dict, self.space.str_w(w_key), w_value)
else:
self.switch_to_object_strategy(w_dict)
w_dict.setitem(w_key, w_value)
def setdefault(self, w_dict, w_key, w_default):
space = self.space
if space.is_w(space.type(w_key), space.w_str):
key = space.str_w(w_key)
w_result = self.getitem_str(w_dict, key)
if w_result is not None:
return w_result
self.setitem_str(w_dict, key, w_default)
return w_default
else:
self.switch_to_object_strategy(w_dict)
return w_dict.setdefault(w_key, w_default)
def delitem(self, w_dict, w_key):
space = self.space
w_key_type = space.type(w_key)
w_obj = self.unerase(w_dict.dstorage)
if space.is_w(w_key_type, space.w_str):
key = self.space.str_w(w_key)
flag = w_obj.deldictvalue(space, key)
if not flag:
raise KeyError
elif _never_equal_to_string(space, w_key_type):
raise KeyError
else:
self.switch_to_object_strategy(w_dict)
w_dict.delitem(w_key)
def length(self, w_dict):
res = 0
curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT)
while curr is not None:
curr = curr.back
curr = curr.search(DICT)
res += 1
return res
def clear(self, w_dict):
w_obj = self.unerase(w_dict.dstorage)
new_obj = w_obj._get_mapdict_map().remove_dict_entries(w_obj)
_become(w_obj, new_obj)
def popitem(self, w_dict):
curr = self.unerase(w_dict.dstorage)._get_mapdict_map().search(DICT)
if curr is None:
raise KeyError
key = curr.name
w_value = self.getitem_str(w_dict, key)
w_key = self.space.wrap(key)
self.delitem(w_dict, w_key)
return (w_key, w_value)
# XXX could implement a more efficient w_keys based on space.newlist_bytes
def iterkeys(self, w_dict):
return MapDictIteratorKeys(self.space, self, w_dict)
def itervalues(self, w_dict):
return MapDictIteratorValues(self.space, self, w_dict)
def iteritems(self, w_dict):
return MapDictIteratorItems(self.space, self, w_dict)
def materialize_r_dict(space, obj, dict_w):
map = obj._get_mapdict_map()
new_obj = map.materialize_r_dict(space, obj, dict_w)
_become(obj, new_obj)
class MapDictIteratorKeys(BaseKeyIterator):
def __init__(self, space, strategy, dictimplementation):
BaseKeyIterator.__init__(self, space, strategy, dictimplementation)
w_obj = strategy.unerase(dictimplementation.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_key_entry(self):
implementation = self.dictimplementation
assert isinstance(implementation.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
curr_map = self.curr_map.search(DICT)
if curr_map:
self.curr_map = curr_map.back
attr = curr_map.name
w_attr = self.space.wrap(attr)
return w_attr
return None
class MapDictIteratorValues(BaseValueIterator):
def __init__(self, space, strategy, dictimplementation):
BaseValueIterator.__init__(self, space, strategy, dictimplementation)
w_obj = strategy.unerase(dictimplementation.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_value_entry(self):
implementation = self.dictimplementation
assert isinstance(implementation.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None
if self.curr_map:
curr_map = self.curr_map.search(DICT)
if curr_map:
self.curr_map = curr_map.back
attr = curr_map.name
return self.w_obj.getdictvalue(self.space, attr)
return None
class MapDictIteratorItems(BaseItemIterator):
def __init__(self, space, strategy, dictimplementation):
BaseItemIterator.__init__(self, space, strategy, dictimplementation)
w_obj = strategy.unerase(dictimplementation.dstorage)
self.w_obj = w_obj
self.orig_map = self.curr_map = w_obj._get_mapdict_map()
def next_item_entry(self):
implementation = self.dictimplementation
assert isinstance(implementation.get_strategy(), MapDictStrategy)
if self.orig_map is not self.w_obj._get_mapdict_map():
return None, None
if self.curr_map:
curr_map = self.curr_map.search(DICT)
if curr_map:
self.curr_map = curr_map.back
attr = curr_map.name
w_attr = self.space.wrap(attr)
return w_attr, self.w_obj.getdictvalue(self.space, attr)
return None, None
# ____________________________________________________________
# Magic caching
class CacheEntry(object):
version_tag = None
storageindex = 0
w_method = None # for callmethod
success_counter = 0
failure_counter = 0
def is_valid_for_obj(self, w_obj):
map = w_obj._get_mapdict_map()
return self.is_valid_for_map(map)
@jit.dont_look_inside
def is_valid_for_map(self, map):
# note that 'map' can be None here
mymap = self.map_wref()
if mymap is not None and mymap is map:
version_tag = map.terminator.w_cls.version_tag()
if version_tag is self.version_tag:
# everything matches, it's incredibly fast
if map.space.config.objspace.std.withmethodcachecounter:
self.success_counter += 1
return True
return False
_invalid_cache_entry_map = objectmodel.instantiate(AbstractAttribute)
_invalid_cache_entry_map.terminator = None
INVALID_CACHE_ENTRY = CacheEntry()
INVALID_CACHE_ENTRY.map_wref = weakref.ref(_invalid_cache_entry_map)
# different from any real map ^^^
def init_mapdict_cache(pycode):
num_entries = len(pycode.co_names_w)
pycode._mapdict_caches = [INVALID_CACHE_ENTRY] * num_entries
@jit.dont_look_inside
def _fill_cache(pycode, nameindex, map, version_tag, storageindex, w_method=None):
entry = pycode._mapdict_caches[nameindex]
if entry is INVALID_CACHE_ENTRY:
entry = CacheEntry()
pycode._mapdict_caches[nameindex] = entry
entry.map_wref = weakref.ref(map)
entry.version_tag = version_tag
entry.storageindex = storageindex
entry.w_method = w_method
if pycode.space.config.objspace.std.withmethodcachecounter:
entry.failure_counter += 1
def LOAD_ATTR_caching(pycode, w_obj, nameindex):
# this whole mess is to make the interpreter quite a bit faster; it's not
# used if we_are_jitted().
entry = pycode._mapdict_caches[nameindex]
map = w_obj._get_mapdict_map()
if entry.is_valid_for_map(map) and entry.w_method is None:
# everything matches, it's incredibly fast
return w_obj._mapdict_read_storage(entry.storageindex)
return LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map)
LOAD_ATTR_caching._always_inline_ = True
def LOAD_ATTR_slowpath(pycode, w_obj, nameindex, map):
space = pycode.space
w_name = pycode.co_names_w[nameindex]
if map is not None:
w_type = map.terminator.w_cls
w_descr = w_type.getattribute_if_not_from_object()
if w_descr is not None:
return space._handle_getattribute(w_descr, w_obj, w_name)
version_tag = w_type.version_tag()
if version_tag is not None:
name = space.str_w(w_name)
# We need to care for obscure cases in which the w_descr is
# a MutableCell, which may change without changing the version_tag
_, w_descr = w_type._pure_lookup_where_possibly_with_method_cache(
name, version_tag)
#
attrname, index = ("", INVALID)
if w_descr is None:
attrname, index = (name, DICT) # common case: no such attr in the class
elif isinstance(w_descr, MutableCell):
pass # we have a MutableCell in the class: give up
elif space.is_data_descr(w_descr):
# we have a data descriptor, which means the dictionary value
# (if any) has no relevance.
from pypy.interpreter.typedef import Member
if isinstance(w_descr, Member): # it is a slot -- easy case
attrname, index = ("slot", SLOTS_STARTING_FROM + w_descr.index)
else:
# There is a non-data descriptor in the class. If there is
# also a dict attribute, use the latter, caching its storageindex.
# If not, we loose. We could do better in this case too,
# but we don't care too much; the common case of a method
# invocation is handled by LOOKUP_METHOD_xxx below.
attrname = name
index = DICT
#
if index != INVALID:
attr = map.find_map_attr(attrname, index)
if attr is not None:
# Note that if map.terminator is a DevolvedDictTerminator,
# map.find_map_attr will always return None if index==DICT.
_fill_cache(pycode, nameindex, map, version_tag, attr.storageindex)
return w_obj._mapdict_read_storage(attr.storageindex)
if space.config.objspace.std.withmethodcachecounter:
INVALID_CACHE_ENTRY.failure_counter += 1
return space.getattr(w_obj, w_name)
LOAD_ATTR_slowpath._dont_inline_ = True
def LOOKUP_METHOD_mapdict(f, nameindex, w_obj):
pycode = f.getcode()
entry = pycode._mapdict_caches[nameindex]
if entry.is_valid_for_obj(w_obj):
w_method = entry.w_method
if w_method is not None:
f.pushvalue(w_method)
f.pushvalue(w_obj)
return True
return False
def LOOKUP_METHOD_mapdict_fill_cache_method(space, pycode, name, nameindex,
w_obj, w_type):
version_tag = w_type.version_tag()
if version_tag is None:
return
map = w_obj._get_mapdict_map()
if map is None or isinstance(map.terminator, DevolvedDictTerminator):
return
# We know here that w_obj.getdictvalue(space, name) just returned None,
# so the 'name' is not in the instance. We repeat the lookup to find it
# in the class, this time taking care of the result: it can be either a
# quasi-constant class attribute, or actually a MutableCell --- which we
# must not cache. (It should not be None here, but you never know...)
_, w_method = w_type._pure_lookup_where_possibly_with_method_cache(
name, version_tag)
if w_method is None or isinstance(w_method, MutableCell):
return
_fill_cache(pycode, nameindex, map, version_tag, -1, w_method)
# XXX fix me: if a function contains a loop with both LOAD_ATTR and
# XXX LOOKUP_METHOD on the same attribute name, it keeps trashing and
# XXX rebuilding the cache
|
py | b40927b8f9d39168c153e30b1fd3f0a883af1fca | import torch
from torch.autograd import Function
from tsd.utils.ext import get_func
from tsd.utils.gen import gen
class ScatterMin(Function):
@staticmethod
def forward(ctx, out, src, index, dim):
arg = index.new_full(out.size(), -1)
func = get_func('scatter_min', src)
func(src, index, out, arg, dim)
ctx.mark_dirty(out)
ctx.dim = dim
ctx.save_for_backward(index, arg)
return out, arg
@staticmethod
def backward(ctx, grad_out, grad_arg):
index, arg = ctx.saved_tensors
grad_src = None
if ctx.needs_input_grad[1]:
size = list(index.size())
size[ctx.dim] += 1
grad_src = grad_out.new_zeros(size)
grad_src.scatter_(ctx.dim, arg.detach() + 1, grad_out)
grad_src = grad_src.narrow(ctx.dim, 1, index.size(ctx.dim))
return None, grad_src, None, None
def scatter_min(src, index, dim=-1, out=None, dim_size=None, fill_value=None):
r"""
|
.. image:: https://raw.githubusercontent.com/rusty1s/pytsd/
master/docs/source/_figures/min.svg?sanitize=true
:align: center
:width: 400px
|
Minimizes all values from the :attr:`src` tensor into :attr:`out` at the
indices specified in the :attr:`index` tensor along a given axis
:attr:`dim`.If multiple indices reference the same location, their
**contributions minimize** (`cf.` :meth:`~tsd.scatter_add`).
The second return tensor contains index location in :attr:`src` of each
minimum value (known as argmin).
For one-dimensional tensors, the operation computes
.. math::
\mathrm{out}_i = \min(\mathrm{out}_i, \min_j(\mathrm{src}_j))
where :math:`\min_j` is over :math:`j` such that
:math:`\mathrm{index}_j = i`.
Args:
src (Tensor): The source tensor.
index (LongTensor): The indices of elements to scatter.
dim (int, optional): The axis along which to index.
(default: :obj:`-1`)
out (Tensor, optional): The destination tensor. (default: :obj:`None`)
dim_size (int, optional): If :attr:`out` is not given, automatically
create output with size :attr:`dim_size` at dimension :attr:`dim`.
If :attr:`dim_size` is not given, a minimal sized output tensor is
returned. (default: :obj:`None`)
fill_value (int, optional): If :attr:`out` is not given, automatically
fill output tensor with :attr:`fill_value`. (default: :obj:`None`)
fill_value (int, optional): If :attr:`out` is not given, automatically
fill output tensor with :attr:`fill_value`. If set to :obj:`None`,
the output tensor is filled with the greatest possible value of
:obj:`src.dtype`. (default: :obj:`None`)
:rtype: (:class:`Tensor`, :class:`LongTensor`)
.. testsetup::
import torch
.. testcode::
from tsd import scatter_min
src = torch.Tensor([[-2, 0, -1, -4, -3], [0, -2, -1, -3, -4]])
index = torch.tensor([[ 4, 5, 4, 2, 3], [0, 0, 2, 2, 1]])
out = src.new_zeros((2, 6))
out, argmin = scatter_min(src, index, out=out)
print(out)
print(argmin)
.. testoutput::
tensor([[ 0., 0., -4., -3., -2., 0.],
[-2., -4., -3., 0., 0., 0.]])
tensor([[-1, -1, 3, 4, 0, 1],
[ 1, 4, 3, -1, -1, -1]])
"""
if fill_value is None:
op = torch.finfo if torch.is_floating_point(src) else torch.iinfo
fill_value = op(src.dtype).max
src, out, index, dim = gen(src, index, dim, out, dim_size, fill_value)
if src.size(dim) == 0: # pragma: no cover
return out, index.new_full(out.size(), -1)
return ScatterMin.apply(out, src, index, dim)
|
py | b4092834d1913184a94280282e4b548fc2864660 | _base_ = [
'../_base_/models/hv_pointpillars_residual_attention_second_ran_ori_kitti.py',
'../_base_/datasets/kitti-3d-3class.py',
'../_base_/schedules/cyclic_40e.py', '../_base_/default_runtime.py'
]
point_cloud_range = [0, -39.68, -3, 69.12, 39.68, 1]
# dataset settings
data_root = 'data/kitti/'
class_names = ['Pedestrian', 'Cyclist', 'Car']
# PointPillars adopted a different sampling strategies among classes
db_sampler = dict(
data_root=data_root,
info_path=data_root + 'kitti_dbinfos_train.pkl',
rate=1.0,
prepare=dict(
filter_by_difficulty=[-1],
filter_by_min_points=dict(Car=5, Pedestrian=10, Cyclist=10)),
classes=class_names,
sample_groups=dict(Car=15, Pedestrian=10, Cyclist=10))
# PointPillars uses different augmentation hyper parameters
train_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(type='ObjectSample', db_sampler=db_sampler),
dict(
type='ObjectNoise',
num_try=100,
translation_std=[0.25, 0.25, 0.25],
global_rot_range=[0.0, 0.0],
rot_range=[-0.15707963267, 0.15707963267]),
dict(type='RandomFlip3D', flip_ratio_bev_horizontal=0.5),
dict(
type='GlobalRotScaleTrans',
rot_range=[-0.78539816, 0.78539816],
scale_ratio_range=[0.95, 1.05]),
dict(type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(type='ObjectRangeFilter', point_cloud_range=point_cloud_range),
dict(type='PointShuffle'),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
test_pipeline = [
dict(type='LoadPointsFromFile', coord_type='LIDAR', load_dim=4, use_dim=4),
dict(
type='MultiScaleFlipAug3D',
img_scale=(1333, 800),
pts_scale_ratio=1,
flip=False,
transforms=[
dict(
type='GlobalRotScaleTrans',
rot_range=[0, 0],
scale_ratio_range=[1., 1.],
translation_std=[0, 0, 0]),
dict(type='RandomFlip3D'),
dict(
type='PointsRangeFilter', point_cloud_range=point_cloud_range),
dict(
type='DefaultFormatBundle3D',
class_names=class_names,
with_label=False),
dict(type='Collect3D', keys=['points'])
])
]
data = dict(
train=dict(dataset=dict(pipeline=train_pipeline, classes=class_names)),
val=dict(pipeline=test_pipeline, classes=class_names),
test=dict(pipeline=test_pipeline, classes=class_names),
samples_per_gpu=12)
# In practice PointPillars also uses a different schedule
# optimizer
lr = 0.00025
optimizer = dict(lr=lr)
# max_norm=35 is slightly better than 10 for PointPillars in the earlier
# development of the codebase thus we keep the setting. But we does not
# specifically tune this parameter.
optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2))
# Use evaluation interval=2 reduce the number of evaluation timese
evaluation = dict(interval=2)
# PointPillars usually need longer schedule than second, we simply double
# the training schedule. Do remind that since we use RepeatDataset and
# repeat factor is 2, so we actually train 160 epochs.
runner = dict(max_epochs=80)
|
py | b409296a4ead906f2938297ac592065b4fa54ed3 | """
Provide a solution to the problem of implementing a function that converts a spreadsheet
column ID (i.e., “A”, “B”, “C”, …, “Z”, “AA”, etc.) to the corresponding integer.
For example, “A” equals 1 because it represents the first column, while “AA” equals 27 because
it represents the 27th column.
"""
def spreadsheet_encoding(val_str):
count = len(val_str) - 1
value = 0
for i in val_str:
value += 26 ** count * (ord(i) - ord('A') + 1)
count -= 1
return value
print(spreadsheet_encoding('ABC')) |
py | b40929b7f254db4443cac6c35cbef36f4f7d6251 | #Modules Required
import cv2
#Database Declaration
recognizer = cv2.face.LBPHFaceRecognizer_create()
recognizer.read('resources/trained_model.yml')
cascade = "resources/haarcascade_frontalface_default.xml"
detector = cv2.CascadeClassifier(cascade);
#Font Declaration
font = cv2.FONT_HERSHEY_DUPLEX
#Retrieving Users
id = 0
pad = open('resources/user.txt','r')
names = pad.read().splitlines()
pad.close()
#Video Dimensions
camera = cv2.VideoCapture(0)
camera.set(3, 1280)
camera.set(4, 1024)
#Minimum Window Dimensions
min_width = 0.1*camera.get(3)
min_height = 0.1*camera.get(4)
#Initiating Recognition
while True:
ret, img = camera.read()
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = detector.detectMultiScale(
gray,
scaleFactor = 1.2,
minNeighbors = 5,
minSize = (int(min_width), int(min_height)),
)
for(x,y,w,h) in faces:
cv2.rectangle(img, (x,y), (x+w,y+h), (0,255,0), 2)
id, confidence = recognizer.predict(gray[y:y+h,x:x+w])
#Confidence is less than 100 ==> "0", Perfect Match
if (confidence < 100):
id = names[id]
confidence = " {0}%".format(round(100 - confidence))
else:
id = "unknown"
confidence = " {0}%".format(round(100 - confidence))
cv2.putText(img, str(id), (x+5,y-5), font, 1, (255,255,255), 2)
cv2.putText(img, str(confidence), (x+5,y+h-5), font, 1, (255,255,0), 1)
cv2.imshow('camera',img)
#Escape key Protocol
k = cv2.waitKey(10) & 0xff
if k == 27:
break
#Exiting
camera.release()
cv2.destroyAllWindows()
|
py | b40929cdcd19d92d8477c3fc88cceb5f493e41fa | import argparse
import subprocess
import logging
import sys
from ipwhois import IPWhois
import json
# Colours for console output
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
# Single whois lookup
def whois_single(domain):
obj = IPWhois(domain)
res = obj.lookup_whois(inc_nir=True)
output = json.dumps(res)
result = []
for line in output.split('\n', 1):
result.append(line)
return result
# Multiple inputs
def whois_multiple(domains):
result = []
for domain in domains:
result += whois_single(domain)
return result
# Read external file with list
def read_filename(filename):
result = []
with open(filename) as f:
for line in f:
line = line.strip()
if line.startswith("#") or line == "":
continue
result.append(line)
return result
def main():
# Argument Parser
parser = argparse.ArgumentParser(description="Run WhoisCheck for IP addresses", usage="python LinuxIPWhois.py", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-s", dest="single", help="Single IP to check, example = -s 216.58.213.14")
parser.add_argument("-f", dest="filename",
help="File to be checked with one IP per line")
args = parser.parse_args()
# Configure logging
logging.basicConfig(filename='WhoisIPLookup.log', level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(name)s %(message)s')
logger = logging.getLogger(__name__)
try:
1 / 0
except ZeroDivisionError as err:
logger.error(err)
# Perform whois lookups and output results
if args.single is not None:
results = whois_single(args.single)
# print(bcolors.OKBLUE + "WhoIS:" + "\n" + "\n" + str(results) + "\n" + bcolors.ENDC)
with open('Results.txt', 'a') as outfile:
to_json = json.dumps(results)
test = json.dump(to_json, outfile)
print(test)
elif args.filename is not None:
domains = whois_multiple(read_filename(args.filename))
# print(bcolors.OKBLUE + "WhoIS" + "\n" + "\n" + str(domains) + "\n" + bcolors.ENDC)
with open('Results.txt', 'a') as outfile:
outfile.write(repr(domains))
if __name__ == "__main__":
main()
|
py | b4092a4f8ebbedb76592f9ef20521d4c73576d90 | from pyramid.view import view_config
@view_config(route_name='logout')
def logout(request):
from pyramid.security import forget
from pyramid.httpexceptions import HTTPFound
if 'id' in request.session: del request.session['id']
if 'group' in request.session: del request.session['group']
request.session.flash('您已登出', 'info')
headers = forget(request)
raise HTTPFound(location=request.route_path('home'), headers=headers)
|
py | b4092b2512843634bcb8a148c64d87f880cecc64 | import json
import os
import requests
from graphql import GraphQLError
from frux_app_server.services.logger import logger
class SmartContractException(GraphQLError):
pass
class SmartContractClient:
def __init__(self):
self.url = os.environ.get('FRUX_SC_URL', 'http://localhost:3000')
self.api_key = os.environ.get('FRUX_SC_API_KEY', '')
def _request(
self,
path,
expected_code=None,
message='make request',
func=requests.get,
body=None,
):
if not body:
body = {}
try:
r = func(
f'{self.url}{path}', json=body, headers={'x-api-key': self.api_key}
)
except requests.ConnectionError as e:
logger.error('Unable to %s! Payments service is down!', message)
raise SmartContractException(
f'Unable to {message}! Payments service is down!'
) from e
if r.status_code == 401:
logger.error('Unable to %s! Invalid API key!', message)
raise SmartContractException(f'Unable to {message}! Invalid API key!')
if r.status_code == 503:
logger.error('Unable to %s! Payments service is down!', message)
raise SmartContractException(
f'Unable to {message}! Payments service is down!'
)
if expected_code and r.status_code != expected_code:
logger.error('Unable to %s! %s - %s', message, str(r.status_code), r.text)
raise SmartContractException(
f'Unable to {message}! {r.status_code} - {r.text}'
)
res = json.loads(r.content.decode())
res['_status_code'] = r.status_code
res['_text'] = r.text
return res
def _validate_enough_funds(self, res):
if res['_status_code'] != 200:
if 'code' in res and res['code'] == 'INSUFFICIENT_FUNDS':
raise SmartContractException(
'Unable to fund project! Insufficient funds!'
)
raise SmartContractException(
f'Unable to fund project! {res["_status_code"]} - {res["_text"]}'
)
def wei_to_eth(self, wei_hex):
return float.fromhex(wei_hex) / (10 ** 18)
def create_user_wallet(self):
'''
Requests a new user wallet, returns a dictionary with the address and internal ID
'''
return self._request(
'/wallet', expected_code=200, message='request wallet', func=requests.post,
)
def get_wallet_balance(self, wallet_id):
'''
Returns the balance for a given wallet
'''
path = f'/wallet/{wallet_id}/balance'
return self._request(path, expected_code=200, message='request balance')[
'balance'
]
def get_private_key(self, wallet_id):
'''
Returns the private key for a given wallet
'''
path = f'/wallet/{wallet_id}'
return self._request(path, expected_code=200, message='request private key')[
'privateKey'
]
def invest_project(self, project_hash, funder_id, amount):
'''
Invests in the project with the given hash the amount of ETH for the funder with the given ID,
returns the total invested amount
'''
body = {
"funderId": funder_id,
"amountToFund": amount,
}
path = f"/project/{project_hash}"
res = self._request(path, message='fund project', func=requests.post, body=body)
self._validate_enough_funds(res)
invested_amount = self.wei_to_eth(res['value']['hex'])
return invested_amount
def withdraw_funds(self, project_hash, funder_id, amount):
'''
Withdraws funds invested in the project with the given hash the amount
of ETH for the funder with the given ID.
Returns the withdrawn fund amount.
'''
body = {'funderId': funder_id}
if amount:
body['fundsToWithdraw'] = amount
self._request(
f"/project/{project_hash}/withdraw",
200,
'withdraw funds',
requests.post,
body,
)
return amount
def complete_stage(self, seer_id, project_hash, stage_index):
'''
Completes the stage with the given index in the project with the given hash.
Returns the stage_index
'''
body = {"reviewerId": seer_id}
res = self._request(
f"/project/{project_hash}/stageId/{stage_index}",
message='complete stage',
func=requests.post,
body=body,
)
self._validate_enough_funds(res)
return stage_index
def create_project_smart_contract(self, owner_id, reviewer_id, stages_cost):
'''
Creates a new smart contract for the owner, reviewer and related stages costs.
Returns the transaction hash.
'''
body = {
"ownerId": owner_id,
"reviewerId": reviewer_id,
"stagesCost": stages_cost,
}
res = self._request(
"/project", message='create project', func=requests.post, body=body
)
return res['txHash']
smart_contract_client = SmartContractClient()
|
py | b4092b97a431fad1262d7d5e0ee8ec76a9f3c740 | """
Copyright (c) 2020, 2021 Russ Hughes
This file incorporates work covered by the following copyright and
permission notice and is licensed under the same terms:
The MIT License (MIT)
Copyright (c) 2019 Ivan Belokobylskiy
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
GC9A01 Display driver in MicroPython based on devbis st7789py_mpy module from
https://github.com/devbis/st7789py_mpy modified to drive 240x240 pixel GC9A01
displays.
The driver supports display rotation, mirroring, scrolling and drawing text
using 8 and 16 bit wide bitmap fonts with heights that are multiples of 8.
Included are 12 bitmap fonts derived from classic pc text mode fonts and a
couple of example programs.
This is a work in progress. Documentation can be found at
https://penfold.owt.com/gc9a01py/.
If you are looking for a faster driver with additional features, check out the
C version of the driver at https://github.com/russhughes/gc9a01_mpy
"""
# pylint: disable=invalid-name,import-error
import time
from micropython import const
import ustruct as struct
# commands
GC9A01_SWRESET = const(0x01)
GC9A01_SLPIN = const(0x10)
GC9A01_SLPOUT = const(0x11)
GC9A01_INVOFF = const(0x20)
GC9A01_INVON = const(0x21)
GC9A01_DISPOFF = const(0x28)
GC9A01_DISPON = const(0x29)
GC9A01_CASET = const(0x2A)
GC9A01_RASET = const(0x2B)
GC9A01_RAMWR = const(0x2C)
GC9A01_VSCRDEF = const(0x33)
GC9A01_COLMOD = const(0x3A)
GC9A01_MADCTL = const(0x36)
GC9A01_VSCSAD = const(0x37)
# Color definitions
BLACK = const(0x0000)
BLUE = const(0x001F)
RED = const(0xF800)
GREEN = const(0x07E0)
CYAN = const(0x07FF)
MAGENTA = const(0xF81F)
YELLOW = const(0xFFE0)
WHITE = const(0xFFFF)
_ENCODE_PIXEL = ">H"
_ENCODE_POS = ">HH"
_DECODE_PIXEL = ">BBB"
_BUFFER_SIZE = const(256)
_BIT7 = const(0x80)
_BIT6 = const(0x40)
_BIT5 = const(0x20)
_BIT4 = const(0x10)
_BIT3 = const(0x08)
_BIT2 = const(0x04)
_BIT1 = const(0x02)
_BIT0 = const(0x01)
ROTATIONS = [
0x48, # 0 - PORTRAIT
0x28, # 1 - LANDSCAPE
0x88, # 2 - INVERTED_PORTRAIT
0xe8, # 3 - INVERTED_LANDSCAPE
0x08, # 4 - PORTRAIT_MIRRORED
0x68, # 5 - LANDSCAPE_MIRRORED
0xc8, # 6 - INVERTED_PORTRAIT_MIRRORED
0xa8] # 7 - INVERTED_LANDSCAPE_MIRRORED]
def color565(red, green=0, blue=0):
"""
Convert red, green and blue values (0-255) into a 16-bit 565 encoded color.
"""
try:
red, green, blue = red # see if the first var is a tuple/list
except TypeError:
pass
return (red & 0xf8) << 8 | (green & 0xfc) << 3 | blue >> 3
def _encode_pos(x, y):
"""Encode a postion into bytes."""
return struct.pack(_ENCODE_POS, x, y)
def _encode_pixel(color):
"""Encode a pixel color into bytes."""
return struct.pack(_ENCODE_PIXEL, color)
class GC9A01():
"""
GC9A01 driver class
Args:
spi (spi): spi object (Required)
dc (pin): dc pin (Required)
cs (pin): cs pin {optional}
reset (pin): reset pin
backlight(pin): backlight pin
rotation (int): display rotation
"""
def __init__(
self,
spi=None,
dc=None,
cs=None,
reset=None,
backlight=None,
rotation=0):
"""
Initialize display.
"""
if spi is None:
raise ValueError("SPI object is required.")
if dc is None:
raise ValueError("dc pin is required.")
self.width = 240
self.height = 240
self.spi = spi
self.reset = reset
self.dc = dc
self.cs = cs
self.backlight = backlight
self._rotation = rotation % 8
self.hard_reset()
time.sleep_ms(100)
self._write(0xEF)
self._write(0xEB, b'\x14')
self._write(0xFE)
self._write(0xEF)
self._write(0xEB, b'\x14')
self._write(0x84, b'\x40')
self._write(0x85, b'\xFF')
self._write(0x86, b'\xFF')
self._write(0x87, b'\xFF')
self._write(0x88, b'\x0A')
self._write(0x89, b'\x21')
self._write(0x8A, b'\x00')
self._write(0x8B, b'\x80')
self._write(0x8C, b'\x01')
self._write(0x8D, b'\x01')
self._write(0x8E, b'\xFF')
self._write(0x8F, b'\xFF')
self._write(0xB6, b'\x00\x00')
self._write(0x3A, b'\x55')
self._write(0x90, b'\x08\x08\x08\x08')
self._write(0xBD, b'\x06')
self._write(0xBC, b'\x00')
self._write(0xFF, b'\x60\x01\x04')
self._write(0xC3, b'\x13')
self._write(0xC4, b'\x13')
self._write(0xC9, b'\x22')
self._write(0xBE, b'\x11')
self._write(0xE1, b'\x10\x0E')
self._write(0xDF, b'\x21\x0c\x02')
self._write(0xF0, b'\x45\x09\x08\x08\x26\x2A')
self._write(0xF1, b'\x43\x70\x72\x36\x37\x6F')
self._write(0xF2, b'\x45\x09\x08\x08\x26\x2A')
self._write(0xF3, b'\x43\x70\x72\x36\x37\x6F')
self._write(0xED, b'\x1B\x0B')
self._write(0xAE, b'\x77')
self._write(0xCD, b'\x63')
self._write(0x70, b'\x07\x07\x04\x0E\x0F\x09\x07\x08\x03')
self._write(0xE8, b'\x34')
self._write(
0x62,
b'\x18\x0D\x71\xED\x70\x70\x18\x0F\x71\xEF\x70\x70')
self._write(
0x63,
b'\x18\x11\x71\xF1\x70\x70\x18\x13\x71\xF3\x70\x70')
self._write(0x64, b'\x28\x29\xF1\x01\xF1\x00\x07')
self._write(
0x66,
b'\x3C\x00\xCD\x67\x45\x45\x10\x00\x00\x00')
self._write(
0x67,
b'\x00\x3C\x00\x00\x00\x01\x54\x10\x32\x98')
self._write(0x74, b'\x10\x85\x80\x00\x00\x4E\x00')
self._write(0x98, b'\x3e\x07')
self._write(0x35)
self._write(0x21)
self._write(0x11)
time.sleep_ms(120)
self._write(0x29)
time.sleep_ms(20)
self.rotation(self._rotation)
if backlight is not None:
backlight.value(1)
def _write(self, command=None, data=None):
"""SPI write to the device: commands and data."""
if self.cs:
self.cs.off()
if command is not None:
self.dc.off()
self.spi.write(bytes([command]))
if data is not None:
self.dc.on()
self.spi.write(data)
if self.cs:
self.cs.on()
def hard_reset(self):
"""Hard reset display."""
if self.reset:
if self.cs:
self.cs.off()
self.reset.on()
time.sleep_ms(50)
self.reset.off()
time.sleep_ms(50)
self.reset.on()
time.sleep_ms(150)
if self.cs:
self.cs.on()
def soft_reset(self):
"""Soft reset display."""
self._write(GC9A01_SWRESET)
time.sleep_ms(150)
def sleep_mode(self, value):
"""
Enable or disable display sleep mode.
Args:
value (bool): if True enable sleep mode.
if False disable sleep mode
"""
if value:
self._write(GC9A01_SLPIN)
else:
self._write(GC9A01_SLPOUT)
def inversion_mode(self, value):
"""
Enable or disable display inversion mode.
Args:
value (bool): if True enable inversion mode.
if False disable inversion mode
"""
if value:
self._write(GC9A01_INVON)
else:
self._write(GC9A01_INVOFF)
def rotation(self, rotation):
"""
Set display rotation.
Args:
rotation (int):
- 0 - PORTRAIT
- 1 - LANDSCAPE
- 2 - INVERTED PORTRAIT
- 3 - INVERTED LANDSCAPE
- 4 - PORTRAIT MIRRORED
- 5 - LANDSCAPE MIRRORED
- 6 - INVERTED PORTRAIT MIRRORED
- 7 - INVERTED LANDSCAPE MIRRORED
"""
self._rotation = rotation % 8
self._write(GC9A01_MADCTL, bytes([ROTATIONS[self._rotation]]))
def _set_columns(self, start, end):
"""
Send CASET (column address set) command to display.
Args:
start (int): column start address
end (int): column end address
"""
if start <= end <= self.width:
self._write(GC9A01_CASET, _encode_pos(
start, end))
def _set_rows(self, start, end):
"""
Send RASET (row address set) command to display.
Args:
start (int): row start address
end (int): row end address
"""
if start <= end <= self.height:
self._write(GC9A01_RASET, _encode_pos(
start, end))
def _set_window(self, x0, y0, x1, y1):
"""
Set window to column and row address.
Args:
x0 (int): column start address
y0 (int): row start address
x1 (int): column end address
y1 (int): row end address
"""
self._set_columns(x0, x1)
self._set_rows(y0, y1)
self._write(GC9A01_RAMWR)
def vline(self, x, y, length, color):
"""
Draw vertical line at the given location and color.
Args:
x (int): x coordinate
Y (int): y coordinate
length (int): length of line
color (int): 565 encoded color
"""
self.fill_rect(x, y, 1, length, color)
def hline(self, x, y, length, color):
"""
Draw horizontal line at the given location and color.
Args:
x (int): x coordinate
Y (int): y coordinate
length (int): length of line
color (int): 565 encoded color
"""
self.fill_rect(x, y, length, 1, color)
def pixel(self, x, y, color):
"""
Draw a pixel at the given location and color.
Args:
x (int): x coordinate
Y (int): y coordinate
color (int): 565 encoded color
"""
self._set_window(x, y, x, y)
self._write(None, _encode_pixel(color))
def blit_buffer(self, buffer, x, y, width, height):
"""
Copy buffer to display at the given location.
Args:
buffer (bytes): Data to copy to display
x (int): Top left corner x coordinate
Y (int): Top left corner y coordinate
width (int): Width
height (int): Height
"""
self._set_window(x, y, x + width - 1, y + height - 1)
self._write(None, buffer)
def rect(self, x, y, w, h, color):
"""
Draw a rectangle at the given location, size and color.
Args:
x (int): Top left corner x coordinate
y (int): Top left corner y coordinate
width (int): Width in pixels
height (int): Height in pixels
color (int): 565 encoded color
"""
self.hline(x, y, w, color)
self.vline(x, y, h, color)
self.vline(x + w - 1, y, h, color)
self.hline(x, y + h - 1, w, color)
def fill_rect(self, x, y, width, height, color):
"""
Draw a rectangle at the given location, size and filled with color.
Args:
x (int): Top left corner x coordinate
y (int): Top left corner y coordinate
width (int): Width in pixels
height (int): Height in pixels
color (int): 565 encoded color
"""
self._set_window(x, y, x + width - 1, y + height - 1)
chunks, rest = divmod(width * height, _BUFFER_SIZE)
pixel = _encode_pixel(color)
self.dc.on()
if chunks:
data = pixel * _BUFFER_SIZE
for _ in range(chunks):
self._write(None, data)
if rest:
self._write(None, pixel * rest)
def fill(self, color):
"""
Fill the entire FrameBuffer with the specified color.
Args:
color (int): 565 encoded color
"""
self.fill_rect(0, 0, self.width, self.height, color)
def line(self, x0, y0, x1, y1, color):
"""
Draw a single pixel wide line starting at x0, y0 and ending at x1, y1.
Args:
x0 (int): Start point x coordinate
y0 (int): Start point y coordinate
x1 (int): End point x coordinate
y1 (int): End point y coordinate
color (int): 565 encoded color
"""
steep = abs(y1 - y0) > abs(x1 - x0)
if steep:
x0, y0 = y0, x0
x1, y1 = y1, x1
if x0 > x1:
x0, x1 = x1, x0
y0, y1 = y1, y0
dx = x1 - x0
dy = abs(y1 - y0)
err = dx // 2
if y0 < y1:
ystep = 1
else:
ystep = -1
while x0 <= x1:
if steep:
self.pixel(y0, x0, color)
else:
self.pixel(x0, y0, color)
err -= dy
if err < 0:
y0 += ystep
err += dx
x0 += 1
def vscrdef(self, tfa, vsa, bfa):
"""
Set Vertical Scrolling Definition.
To scroll a 135x240 display these values should be 40, 240, 40.
There are 40 lines above the display that are not shown followed by
240 lines that are shown followed by 40 more lines that are not shown.
You could write to these areas off display and scroll them into view by
changing the TFA, VSA and BFA values.
Args:
tfa (int): Top Fixed Area
vsa (int): Vertical Scrolling Area
bfa (int): Bottom Fixed Area
"""
struct.pack(">HHH")
self._write(GC9A01_VSCRDEF, struct.pack(">HHH", tfa, vsa, bfa))
def vscsad(self, vssa):
"""
Set Vertical Scroll Start Address of RAM.
Defines which line in the Frame Memory will be written as the first
line after the last line of the Top Fixed Area on the display
Example:
for line in range(40, 280, 1):
tft.vscsad(line)
utime.sleep(0.01)
Args:
vssa (int): Vertical Scrolling Start Address
"""
self._write(GC9A01_VSCSAD, struct.pack(">H", vssa))
def _text8(self, font, text, x0, y0, color=WHITE, background=BLACK):
"""
Internal method to write characters with width of 8 and
heights of 8 or 16.
Args:
font (module): font module to use
text (str): text to write
x0 (int): column to start drawing at
y0 (int): row to start drawing at
color (int): 565 encoded color to use for characters
background (int): 565 encoded color to use for background
"""
for char in text:
ch = ord(char)
if (font.FIRST <= ch < font.LAST
and x0+font.WIDTH <= self.width
and y0+font.HEIGHT <= self.height):
if font.HEIGHT == 8:
passes = 1
size = 8
each = 0
else:
passes = 2
size = 16
each = 8
for line in range(passes):
idx = (ch-font.FIRST)*size+(each*line)
#
# Yes, this looks bad, but it is fast
#
buffer = struct.pack(
'>64H',
color if font.FONT[idx] & _BIT7 else background,
color if font.FONT[idx] & _BIT6 else background,
color if font.FONT[idx] & _BIT5 else background,
color if font.FONT[idx] & _BIT4 else background,
color if font.FONT[idx] & _BIT3 else background,
color if font.FONT[idx] & _BIT2 else background,
color if font.FONT[idx] & _BIT1 else background,
color if font.FONT[idx] & _BIT0 else background,
color if font.FONT[idx+1] & _BIT7 else background,
color if font.FONT[idx+1] & _BIT6 else background,
color if font.FONT[idx+1] & _BIT5 else background,
color if font.FONT[idx+1] & _BIT4 else background,
color if font.FONT[idx+1] & _BIT3 else background,
color if font.FONT[idx+1] & _BIT2 else background,
color if font.FONT[idx+1] & _BIT1 else background,
color if font.FONT[idx+1] & _BIT0 else background,
color if font.FONT[idx+2] & _BIT7 else background,
color if font.FONT[idx+2] & _BIT6 else background,
color if font.FONT[idx+2] & _BIT5 else background,
color if font.FONT[idx+2] & _BIT4 else background,
color if font.FONT[idx+2] & _BIT3 else background,
color if font.FONT[idx+2] & _BIT2 else background,
color if font.FONT[idx+2] & _BIT1 else background,
color if font.FONT[idx+2] & _BIT0 else background,
color if font.FONT[idx+3] & _BIT7 else background,
color if font.FONT[idx+3] & _BIT6 else background,
color if font.FONT[idx+3] & _BIT5 else background,
color if font.FONT[idx+3] & _BIT4 else background,
color if font.FONT[idx+3] & _BIT3 else background,
color if font.FONT[idx+3] & _BIT2 else background,
color if font.FONT[idx+3] & _BIT1 else background,
color if font.FONT[idx+3] & _BIT0 else background,
color if font.FONT[idx+4] & _BIT7 else background,
color if font.FONT[idx+4] & _BIT6 else background,
color if font.FONT[idx+4] & _BIT5 else background,
color if font.FONT[idx+4] & _BIT4 else background,
color if font.FONT[idx+4] & _BIT3 else background,
color if font.FONT[idx+4] & _BIT2 else background,
color if font.FONT[idx+4] & _BIT1 else background,
color if font.FONT[idx+4] & _BIT0 else background,
color if font.FONT[idx+5] & _BIT7 else background,
color if font.FONT[idx+5] & _BIT6 else background,
color if font.FONT[idx+5] & _BIT5 else background,
color if font.FONT[idx+5] & _BIT4 else background,
color if font.FONT[idx+5] & _BIT3 else background,
color if font.FONT[idx+5] & _BIT2 else background,
color if font.FONT[idx+5] & _BIT1 else background,
color if font.FONT[idx+5] & _BIT0 else background,
color if font.FONT[idx+6] & _BIT7 else background,
color if font.FONT[idx+6] & _BIT6 else background,
color if font.FONT[idx+6] & _BIT5 else background,
color if font.FONT[idx+6] & _BIT4 else background,
color if font.FONT[idx+6] & _BIT3 else background,
color if font.FONT[idx+6] & _BIT2 else background,
color if font.FONT[idx+6] & _BIT1 else background,
color if font.FONT[idx+6] & _BIT0 else background,
color if font.FONT[idx+7] & _BIT7 else background,
color if font.FONT[idx+7] & _BIT6 else background,
color if font.FONT[idx+7] & _BIT5 else background,
color if font.FONT[idx+7] & _BIT4 else background,
color if font.FONT[idx+7] & _BIT3 else background,
color if font.FONT[idx+7] & _BIT2 else background,
color if font.FONT[idx+7] & _BIT1 else background,
color if font.FONT[idx+7] & _BIT0 else background
)
self.blit_buffer(buffer, x0, y0+8*line, 8, 8)
x0 += 8
def _text16(self, font, text, x0, y0, color=WHITE, background=BLACK):
"""
Internal method to draw characters with width of 16 and heights of 16
or 32.
Args:
font (module): font module to use
text (str): text to write
x0 (int): column to start drawing at
y0 (int): row to start drawing at
color (int): 565 encoded color to use for characters
background (int): 565 encoded color to use for background
"""
for char in text:
ch = ord(char)
if (font.FIRST <= ch < font.LAST
and x0+font.WIDTH <= self.width
and y0+font.HEIGHT <= self.height):
if font.HEIGHT == 16:
passes = 2
size = 32
each = 16
else:
passes = 4
size = 64
each = 16
for line in range(passes):
idx = (ch-font.FIRST)*size+(each*line)
#
# And this looks even worse, but it is fast
#
buffer = struct.pack(
'>128H',
color if font.FONT[idx] & _BIT7 else background,
color if font.FONT[idx] & _BIT6 else background,
color if font.FONT[idx] & _BIT5 else background,
color if font.FONT[idx] & _BIT4 else background,
color if font.FONT[idx] & _BIT3 else background,
color if font.FONT[idx] & _BIT2 else background,
color if font.FONT[idx] & _BIT1 else background,
color if font.FONT[idx] & _BIT0 else background,
color if font.FONT[idx+1] & _BIT7 else background,
color if font.FONT[idx+1] & _BIT6 else background,
color if font.FONT[idx+1] & _BIT5 else background,
color if font.FONT[idx+1] & _BIT4 else background,
color if font.FONT[idx+1] & _BIT3 else background,
color if font.FONT[idx+1] & _BIT2 else background,
color if font.FONT[idx+1] & _BIT1 else background,
color if font.FONT[idx+1] & _BIT0 else background,
color if font.FONT[idx+2] & _BIT7 else background,
color if font.FONT[idx+2] & _BIT6 else background,
color if font.FONT[idx+2] & _BIT5 else background,
color if font.FONT[idx+2] & _BIT4 else background,
color if font.FONT[idx+2] & _BIT3 else background,
color if font.FONT[idx+2] & _BIT2 else background,
color if font.FONT[idx+2] & _BIT1 else background,
color if font.FONT[idx+2] & _BIT0 else background,
color if font.FONT[idx+3] & _BIT7 else background,
color if font.FONT[idx+3] & _BIT6 else background,
color if font.FONT[idx+3] & _BIT5 else background,
color if font.FONT[idx+3] & _BIT4 else background,
color if font.FONT[idx+3] & _BIT3 else background,
color if font.FONT[idx+3] & _BIT2 else background,
color if font.FONT[idx+3] & _BIT1 else background,
color if font.FONT[idx+3] & _BIT0 else background,
color if font.FONT[idx+4] & _BIT7 else background,
color if font.FONT[idx+4] & _BIT6 else background,
color if font.FONT[idx+4] & _BIT5 else background,
color if font.FONT[idx+4] & _BIT4 else background,
color if font.FONT[idx+4] & _BIT3 else background,
color if font.FONT[idx+4] & _BIT2 else background,
color if font.FONT[idx+4] & _BIT1 else background,
color if font.FONT[idx+4] & _BIT0 else background,
color if font.FONT[idx+5] & _BIT7 else background,
color if font.FONT[idx+5] & _BIT6 else background,
color if font.FONT[idx+5] & _BIT5 else background,
color if font.FONT[idx+5] & _BIT4 else background,
color if font.FONT[idx+5] & _BIT3 else background,
color if font.FONT[idx+5] & _BIT2 else background,
color if font.FONT[idx+5] & _BIT1 else background,
color if font.FONT[idx+5] & _BIT0 else background,
color if font.FONT[idx+6] & _BIT7 else background,
color if font.FONT[idx+6] & _BIT6 else background,
color if font.FONT[idx+6] & _BIT5 else background,
color if font.FONT[idx+6] & _BIT4 else background,
color if font.FONT[idx+6] & _BIT3 else background,
color if font.FONT[idx+6] & _BIT2 else background,
color if font.FONT[idx+6] & _BIT1 else background,
color if font.FONT[idx+6] & _BIT0 else background,
color if font.FONT[idx+7] & _BIT7 else background,
color if font.FONT[idx+7] & _BIT6 else background,
color if font.FONT[idx+7] & _BIT5 else background,
color if font.FONT[idx+7] & _BIT4 else background,
color if font.FONT[idx+7] & _BIT3 else background,
color if font.FONT[idx+7] & _BIT2 else background,
color if font.FONT[idx+7] & _BIT1 else background,
color if font.FONT[idx+7] & _BIT0 else background,
color if font.FONT[idx+8] & _BIT7 else background,
color if font.FONT[idx+8] & _BIT6 else background,
color if font.FONT[idx+8] & _BIT5 else background,
color if font.FONT[idx+8] & _BIT4 else background,
color if font.FONT[idx+8] & _BIT3 else background,
color if font.FONT[idx+8] & _BIT2 else background,
color if font.FONT[idx+8] & _BIT1 else background,
color if font.FONT[idx+8] & _BIT0 else background,
color if font.FONT[idx+9] & _BIT7 else background,
color if font.FONT[idx+9] & _BIT6 else background,
color if font.FONT[idx+9] & _BIT5 else background,
color if font.FONT[idx+9] & _BIT4 else background,
color if font.FONT[idx+9] & _BIT3 else background,
color if font.FONT[idx+9] & _BIT2 else background,
color if font.FONT[idx+9] & _BIT1 else background,
color if font.FONT[idx+9] & _BIT0 else background,
color if font.FONT[idx+10] & _BIT7 else background,
color if font.FONT[idx+10] & _BIT6 else background,
color if font.FONT[idx+10] & _BIT5 else background,
color if font.FONT[idx+10] & _BIT4 else background,
color if font.FONT[idx+10] & _BIT3 else background,
color if font.FONT[idx+10] & _BIT2 else background,
color if font.FONT[idx+10] & _BIT1 else background,
color if font.FONT[idx+10] & _BIT0 else background,
color if font.FONT[idx+11] & _BIT7 else background,
color if font.FONT[idx+11] & _BIT6 else background,
color if font.FONT[idx+11] & _BIT5 else background,
color if font.FONT[idx+11] & _BIT4 else background,
color if font.FONT[idx+11] & _BIT3 else background,
color if font.FONT[idx+11] & _BIT2 else background,
color if font.FONT[idx+11] & _BIT1 else background,
color if font.FONT[idx+11] & _BIT0 else background,
color if font.FONT[idx+12] & _BIT7 else background,
color if font.FONT[idx+12] & _BIT6 else background,
color if font.FONT[idx+12] & _BIT5 else background,
color if font.FONT[idx+12] & _BIT4 else background,
color if font.FONT[idx+12] & _BIT3 else background,
color if font.FONT[idx+12] & _BIT2 else background,
color if font.FONT[idx+12] & _BIT1 else background,
color if font.FONT[idx+12] & _BIT0 else background,
color if font.FONT[idx+13] & _BIT7 else background,
color if font.FONT[idx+13] & _BIT6 else background,
color if font.FONT[idx+13] & _BIT5 else background,
color if font.FONT[idx+13] & _BIT4 else background,
color if font.FONT[idx+13] & _BIT3 else background,
color if font.FONT[idx+13] & _BIT2 else background,
color if font.FONT[idx+13] & _BIT1 else background,
color if font.FONT[idx+13] & _BIT0 else background,
color if font.FONT[idx+14] & _BIT7 else background,
color if font.FONT[idx+14] & _BIT6 else background,
color if font.FONT[idx+14] & _BIT5 else background,
color if font.FONT[idx+14] & _BIT4 else background,
color if font.FONT[idx+14] & _BIT3 else background,
color if font.FONT[idx+14] & _BIT2 else background,
color if font.FONT[idx+14] & _BIT1 else background,
color if font.FONT[idx+14] & _BIT0 else background,
color if font.FONT[idx+15] & _BIT7 else background,
color if font.FONT[idx+15] & _BIT6 else background,
color if font.FONT[idx+15] & _BIT5 else background,
color if font.FONT[idx+15] & _BIT4 else background,
color if font.FONT[idx+15] & _BIT3 else background,
color if font.FONT[idx+15] & _BIT2 else background,
color if font.FONT[idx+15] & _BIT1 else background,
color if font.FONT[idx+15] & _BIT0 else background
)
self.blit_buffer(buffer, x0, y0+8*line, 16, 8)
x0 += font.WIDTH
def text(self, font, text, x0, y0, color=WHITE, background=BLACK):
"""
Draw text on display in specified font and colors. 8 and 16 bit wide
fonts are supported.
Args:
font (module): font module to use.
text (str): text to write
x0 (int): column to start drawing at
y0 (int): row to start drawing at
color (int): 565 encoded color to use for characters
background (int): 565 encoded color to use for background
"""
if font.WIDTH == 8:
self._text8(font, text, x0, y0, color, background)
else:
self._text16(font, text, x0, y0, color, background)
def bitmap(self, bitmap, x, y, index=0):
"""
Draw a bitmap on display at the specified column and row
Args:
bitmap (bitmap_module): The module containing the bitmap to draw
x (int): column to start drawing at
y (int): row to start drawing at
index (int): Optional index of bitmap to draw from multiple bitmap
module
"""
bitmap_size = bitmap.HEIGHT * bitmap.WIDTH
buffer_len = bitmap_size * 2
buffer = bytearray(buffer_len)
bs_bit = bitmap.BPP * bitmap_size * index if index > 0 else 0
for i in range(0, buffer_len, 2):
color_index = 0
for bit in range(bitmap.BPP):
color_index <<= 1
color_index |= (bitmap.BITMAP[bs_bit // 8]
& 1 << (7 - (bs_bit % 8))) > 0
bs_bit += 1
color = bitmap.PALETTE[color_index]
buffer[i] = color & 0xff00 >> 8
buffer[i + 1] = color_index & 0xff
self.blit_buffer(buffer, x, y, bitmap.WIDTH, bitmap.HEIGHT)
# @micropython.native
def write(self, font, string, x, y, fg=WHITE, bg=BLACK):
"""
Write a string using a converted true-type font on the display starting
at the specified column and row
Args:
font (font): The module containing the converted true-type font
s (string): The string to write
x (int): column to start writing
y (int): row to start writing
fg (int): foreground color, optional, defaults to WHITE
bg (int): background color, optional, defaults to BLACK
"""
buffer_len = font.HEIGHT * font.MAX_WIDTH * 2
buffer = bytearray(buffer_len)
fg_hi = (fg & 0xff00) >> 8
fg_lo = fg & 0xff
bg_hi = (bg & 0xff00) >> 8
bg_lo = bg & 0xff
for character in string:
try:
char_index = font.MAP.index(character)
offset = char_index * font.OFFSET_WIDTH
bs_bit = font.OFFSETS[offset]
if font.OFFSET_WIDTH > 1:
bs_bit = (bs_bit << 8) + font.OFFSETS[offset + 1]
if font.OFFSET_WIDTH > 2:
bs_bit = (bs_bit << 8) + font.OFFSETS[offset + 2]
char_width = font.WIDTHS[char_index]
buffer_needed = char_width * font.HEIGHT * 2
for i in range(0, buffer_needed, 2):
if font.BITMAPS[bs_bit // 8] & 1 << (7 - (bs_bit % 8)) > 0:
buffer[i] = fg_hi
buffer[i + 1] = fg_lo
else:
buffer[i] = bg_hi
buffer[i + 1] = bg_lo
bs_bit += 1
to_col = x + char_width - 1
to_row = y + font.HEIGHT - 1
if self.width > to_col and self.height > to_row:
self._set_window(x, y, to_col, to_row)
self._write(None, buffer[0:buffer_needed])
x += char_width
except ValueError:
pass
def write_width(self, font, string):
"""
Returns the width in pixels of the string if it was written with the
specified font
Args:
font (font): The module containing the converted true-type font
string (string): The string to measure
"""
width = 0
for character in string:
try:
char_index = font.MAP.index(character)
width += font.WIDTHS[char_index]
except ValueError:
pass
return width
|
py | b4092b9eaec195eb5a90f368b3d4c45916d4e804 | import unittest
import json
import re
from base64 import b64encode
from flask import url_for
from app import create_app, db
from app.models import User, Role
class APITestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
Role.insert_roles()
self.client = self.app.test_client()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def get_api_headers(self, username, password):
return {
'Authorization': 'Basic ' + b64encode(
(username + ':' + password).encode('utf-8')).decode('utf-8'),
'Accept': 'application/json',
'Content-Type': 'application/json'
}
def test_404(self):
response = self.client.get(
'/wrong/url',
headers=self.get_api_headers('email', 'password'))
self.assertTrue(response.status_code == 404)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['error'] == 'not found')
def test_no_auth(self):
response = self.client.get(url_for('api.get_posts'),
content_type='application/json')
self.assertTrue(response.status_code == 200)
def test_bad_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# authenticate with bad password
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 401)
def test_token_auth(self):
# add a user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=True,
role=r)
db.session.add(u)
db.session.commit()
# issue a request with a bad token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('bad-token', ''))
self.assertTrue(response.status_code == 401)
# get a token
response = self.client.get(
url_for('api.get_token'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertIsNotNone(json_response.get('token'))
token = json_response['token']
# issue a request with the token
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers(token, ''))
self.assertTrue(response.status_code == 200)
def test_anonymous(self):
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('', ''))
self.assertTrue(response.status_code == 200)
def test_unconfirmed_account(self):
# add an unconfirmed user
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u = User(email='[email protected]', password='cat', confirmed=False,
role=r)
db.session.add(u)
db.session.commit()
# get list of posts with the unconfirmed account
response = self.client.get(
url_for('api.get_posts'),
headers=self.get_api_headers('[email protected]', 'cat'))
self.assertTrue(response.status_code == 403)
def test_users(self):
# add two users
r = Role.query.filter_by(name='User').first()
self.assertIsNotNone(r)
u1 = User(email='[email protected]', username='john',
password='cat', confirmed=True, role=r)
u2 = User(email='[email protected]', username='susan',
password='dog', confirmed=True, role=r)
db.session.add_all([u1, u2])
db.session.commit()
# get users
response = self.client.get(
url_for('api.get_user', id=u1.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'john')
response = self.client.get(
url_for('api.get_user', id=u2.id),
headers=self.get_api_headers('[email protected]', 'dog'))
self.assertTrue(response.status_code == 200)
json_response = json.loads(response.data.decode('utf-8'))
self.assertTrue(json_response['username'] == 'susan')
|
py | b4092ceb86dd2f6b4d7d3c2ced2f31e41a04ca33 | from contextlib import contextmanager
import aloe_webdriver
import aloe_webdriver.django
from aloe import around, world, step
from selenium import webdriver
from selenium.webdriver.support.ui import Select
@around.each_example
@contextmanager
def with_browser(scenario,outline,steps):
world.browser = webdriver.Firefox()
yield
world.browser.quit()
delattr(world,'browser')
@step(r'I click in "(.*)"')
def click(scenario, link):
world.browser.find_element_by_link_text(link).click()
@step(r'I select "(.*)" from "(.*)"')
def select(scenario, text, select_id):
select = Select(world.browser.find_element_by_id(select_id))
select.select_by_visible_text(text)
|
py | b4092d038551e185bf0fbd44a733cc4158727e53 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This pip smoke test verifies dependency files exist in the pip package.
This script runs bazel queries to see what python files are required by the
tests and ensures they are in the pip package superset.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import subprocess
os.chdir(os.path.abspath(os.path.join(os.path.dirname(__file__), "../../..")))
PIP_PACKAGE_QUERY_EXPRESSION = (
"deps(//tensorflow/tools/pip_package:build_pip_package)")
# List of file paths containing BUILD files that should not be included for the
# pip smoke test.
BUILD_BLACKLIST = [
"tensorflow/lite/delegates/gpu",
"tensorflow/lite/delegates/gpu/metal",
"tensorflow/lite/delegates/gpu/metal/kernels",
"tensorflow/lite/experimental/objc",
"tensorflow/lite/experimental/swift",
]
def GetBuild(dir_base):
"""Get the list of BUILD file all targets recursively startind at dir_base."""
items = []
for root, _, files in os.walk(dir_base):
for name in files:
if (name == "BUILD" and root not in BUILD_BLACKLIST):
items.append("//" + root + ":all")
return items
def BuildPyTestDependencies():
python_targets = GetBuild("tensorflow/python")
tensorflow_targets = GetBuild("tensorflow")
# Build list of test targets,
# python - attr(manual|pno_pip)
targets = " + ".join(python_targets)
targets += ' - attr(tags, "manual|no_pip", %s)' % " + ".join(
tensorflow_targets)
query_kind = "kind(py_test, %s)" % targets
# Skip benchmarks etc.
query_filter = 'filter("^((?!benchmark).)*$", %s)' % query_kind
# Get the dependencies
query_deps = "deps(%s, 1)" % query_filter
return python_targets, query_deps
PYTHON_TARGETS, PY_TEST_QUERY_EXPRESSION = BuildPyTestDependencies()
# TODO(amitpatankar): Clean up blacklist.
# List of dependencies that should not included in the pip package.
DEPENDENCY_BLACKLIST = [
"//tensorflow/python:extra_py_tests_deps",
"//tensorflow/cc/saved_model:saved_model_half_plus_two",
"//tensorflow:no_tensorflow_py_deps",
"//tensorflow/tools/pip_package:win_pip_package_marker",
"//tensorflow/python:test_ops_2",
"//tensorflow/python:tf_optimizer",
"//tensorflow/python:compare_test_proto_py",
"//tensorflow/core:image_testdata",
"//tensorflow/core:lmdb_testdata",
"//tensorflow/core/kernels/cloud:bigquery_reader_ops",
"//tensorflow/python/debug:grpc_tensorflow_server.par",
"//tensorflow/python/feature_column:vocabulary_testdata",
"//tensorflow/python:framework/test_file_system.so",
"//tensorflow/python:util_nest_test_main_lib",
# lite
"//tensorflow/lite/experimental/examples/lstm:rnn_cell",
"//tensorflow/lite/experimental/examples/lstm:rnn_cell.py",
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test", # pylint:disable=line-too-long
"//tensorflow/lite/experimental/examples/lstm:unidirectional_sequence_lstm_test.py", # pylint:disable=line-too-long
"//tensorflow/lite/python:interpreter",
"//tensorflow/lite/python:interpreter_test",
"//tensorflow/lite/python:interpreter.py",
"//tensorflow/lite/python:interpreter_test.py",
# failing presubmits on the branch
"//tensorflow/contrib/tpu:tpu_lib",
"//tensorflow/python/tpu:tpu",
"//tensorflow/contrib/lookup:lookup_py",
"//tensorflow/contrib/layers:layers_py",
]
def main():
"""This script runs the pip smoke test.
Raises:
RuntimeError: If any dependencies for py_tests exist in subSet
Prerequisites:
1. Bazel is installed.
2. Running in github repo of tensorflow.
3. Configure has been run.
"""
# pip_package_dependencies_list is the list of included files in pip packages
pip_package_dependencies = subprocess.check_output(
["bazel", "cquery", PIP_PACKAGE_QUERY_EXPRESSION])
if isinstance(pip_package_dependencies, bytes):
pip_package_dependencies = pip_package_dependencies.decode("utf-8")
pip_package_dependencies_list = pip_package_dependencies.strip().split("\n")
pip_package_dependencies_list = [
x.split()[0] for x in pip_package_dependencies_list
]
print("Pip package superset size: %d" % len(pip_package_dependencies_list))
# tf_py_test_dependencies is the list of dependencies for all python
# tests in tensorflow
tf_py_test_dependencies = subprocess.check_output(
["bazel", "cquery", PY_TEST_QUERY_EXPRESSION])
if isinstance(tf_py_test_dependencies, bytes):
tf_py_test_dependencies = tf_py_test_dependencies.decode("utf-8")
tf_py_test_dependencies_list = tf_py_test_dependencies.strip().split("\n")
tf_py_test_dependencies_list = [
x.split()[0] for x in tf_py_test_dependencies.strip().split("\n")
]
print("Pytest dependency subset size: %d" % len(tf_py_test_dependencies_list))
missing_dependencies = []
# File extensions and endings to ignore
ignore_extensions = [
"_test", "_test.py", "_test_gpu", "_test_gpu.py", "_test_lib"
]
ignored_files_count = 0
blacklisted_dependencies_count = len(DEPENDENCY_BLACKLIST)
# Compare dependencies
for dependency in tf_py_test_dependencies_list:
if dependency and dependency.startswith("//tensorflow"):
ignore = False
# Ignore extensions
if any(dependency.endswith(ext) for ext in ignore_extensions):
ignore = True
ignored_files_count += 1
# Check if the dependency is in the pip package, the dependency blacklist,
# or should be ignored because of its file extension.
if not (ignore or dependency in pip_package_dependencies_list or
dependency in DEPENDENCY_BLACKLIST):
missing_dependencies.append(dependency)
print("Ignored files count: %d" % ignored_files_count)
print("Blacklisted dependencies count: %d" % blacklisted_dependencies_count)
if missing_dependencies:
print("Missing the following dependencies from pip_packages:")
for missing_dependency in missing_dependencies:
print("\nMissing dependency: %s " % missing_dependency)
print("Affected Tests:")
rdep_query = ("rdeps(kind(py_test, %s), %s)" %
(" + ".join(PYTHON_TARGETS), missing_dependency))
affected_tests = subprocess.check_output(["bazel", "cquery", rdep_query])
affected_tests_list = affected_tests.split("\n")[:-2]
print("\n".join(affected_tests_list))
raise RuntimeError("""
One or more added test dependencies are not in the pip package.
If these test dependencies need to be in TensorFlow pip package, please add them to //tensorflow/tools/pip_package/BUILD.
Else either blacklist the dependencies in //tensorflow/tools/pip_package/pip_smoke_test.py
or add no_pip tag to the test.""")
else:
print("TEST PASSED")
if __name__ == "__main__":
main()
|
py | b4092dde4e9f4df216d0dcb60aa6f5adac1182b3 | import wordlist
import csv
# How big are the two lists
# print(len(wordlist.wordlist))
# print(len(wordlist.wordlist2))
# How similar are the two lists?
# the two wordlists are completely different.
# wl2 = set(wordlist.wordlist2)
# not_in_wordlist_2 = []
# for word in wordlist.wordlist:
# if set(word).intersection(wl2) == set():
# print("word %s is not in both lists" % word)
# not_in_wordlist_2.append(word)
# print(len(not_in_wordlist_2))
# print(len(wordlist.wordlist))
# Which list contains some of the more recent answers?
# List 1 contains all of them.
# with open('dailywords.csv', 'r') as csv_file:
# reader = csv.DictReader(csv_file)
# for row in reader:
# # print(row["word"])
# print(row['word'].lower() in wordlist.wordlist2)
# Solving strategy.
# count the unique vowels in each word (so multiple e's dont screw with it)
# interesting fact: "audio" is statistically the best first word to pick because it has 4 of the 5 vowels in it
words = wordlist.wordle_solutions
winnowlist = []
correct_letters = list("_____")
excluded_letters = []
misplaced_letters = []
def some_or_none(value):
return value if value == 0 else 1
with open('vowelcounts-unique.csv', 'w') as csvfile:
fieldnames = ['word', 'total_vowels', 'unique_vowels']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for word in words:
vowels = list(map(word.lower().count, "aeiou"))
# count total vowels
count = sum(vowels)
# count number of unique vowels
unique_vowels = map(some_or_none, vowels)
ucount = sum(unique_vowels)
# uncoment me to write the results out to a csv
result = {'word': word, 'total_vowels': count, 'unique_vowels': ucount}
winnowlist.append(result)
# writer.writerow(result)
winnowlist = sorted(winnowlist, key=lambda d: d['unique_vowels'], reverse=True)
print("The best first guess is: " + winnowlist[0]['word'])
guess_word = winnowlist[0]['word']
def update_hints(user_input, word, wordlength=5):
"""updates the values of the correct, excluded, and misplaced characters provided by wordle based on user input
this function expects user_input to be a 5 character, case insensitive string of these characters to represent the data from wordle:
C - letter at this position is correct and in the right order
M - Letter at this position is correct but misplaced
X - Letter at this position is incorrect/excluded
Args:
user_input ([type]): [description]
word ([type]): [the word that the user entered
"""
global excluded_letters, misplaced_letters, correct_letters
if len(user_input) > wordlength:
raise ValueError("User input must be %d characters" % wordlength)
if len(word) > wordlength:
raise ValueError("word must be %d characters" % wordlength)
if set(user_input.lower())-set("cxm"):
raise ValueError("User input can only contain the following characters (case insensitive): C X M")
data = zip(word, user_input, range(wordlength))
to_exclude = []
for char in data:
letter, result, index = char
result = result.lower()
if result == "c":
if letter in misplaced_letters:
misplaced_letters.remove(letter)
if letter in to_exclude:
to_exclude.remove(letter)
if correct_letters[index] == "_":
correct_letters[index] = letter
elif result == "x" and letter not in correct_letters:
to_exclude.append(letter)
elif result == "m":
misplaced_letters.append(letter)
excluded_letters.extend(to_exclude)
def check_word(word):
for index, letter in enumerate(word):
if letter in excluded_letters:
return False
elif correct_letters[index] != "_" and letter != correct_letters[index]:
return False
# elif letter not in misplaced_letters:
# return False
return True
def winnow(winnowlist):
"""iterates through the winnowlist and removes values that cant possibly be the answer
Args:
winnowlist ([type]): [description]
"""
new_winnowlist = []
# excluded_set = set(excluded_letters)
# misplaced_set = set(misplaced_letters)
# check criteria exclusion.
# exclude if ...
for entry in winnowlist:
word = entry["word"]
if check_word(word):
new_winnowlist.append(entry)
return new_winnowlist
def recalculate_order(winnowlist):
"""sorts the winnowlist based on how many misplaced characters it has
Args:
winnowlist ([type]): [description]
"""
for entry in winnowlist:
word = entry["word"]
misplaced_letters_present = list(map(word.lower().count, misplaced_letters))
# count total vowels
score = sum(misplaced_letters_present)
# unique_vowels = map(some_or_none, vowels)
# ucount = sum(unique_vowels)
entry["misplaced_score"] = score
return sorted(winnowlist, key=lambda d: d['misplaced_score'], reverse=True)
# start a prompt loop
while len(winnowlist) > 1:
guess_word = winnowlist[0]['word']
print("Your next word to guess is: " + guess_word)
user_input = input("what did this word score? Enter C if the character was correct, X if it is wrong, and M if it was misplaced. if the word is invalid in your game, enter INVALID:")
if user_input == "INVALID":
words_in_list = list(map(lambda d: d["word"], winnowlist))
index = words_in_list.index(guess_word)
del winnowlist[index]
continue
update_hints(user_input, guess_word)
winnowlist = winnow(winnowlist)
if winnowlist[0]['word'] == guess_word:
winnowlist.remove(winnowlist[0])
winnowlist = recalculate_order(winnowlist)
# print(*map(lambda d: d["word"], winnowlist))
# print(excluded_letters)
print("Game Complete")
# Research questions:
# - with this algorithm, can you gaurantee a win in a certain number of guesses?
# - how does this compare to a human player?
# |
py | b4092e2f2684725f06f54fa1106c4cfc5951c466 | import os
AppName = "NomenSequence"
ExecutableZip = AppName + ".pyzw"
CurrentWorkingDirectory = os.getcwd()
AbsolutePathToExecutableZip = CurrentWorkingDirectory + "/" + ExecutableZip
AbsolutePathToIconPNG = CurrentWorkingDirectory + "/Assets/NomenSequence Icon.png"
DesktopFileContents = """[Desktop Entry]
Type=Application
Name={0}
Exec=python3 "{1}"
Icon={2}
Categories=Application;"""
DesktopFileContents = DesktopFileContents.format(AppName, AbsolutePathToExecutableZip, AbsolutePathToIconPNG)
with open(AppName + ".desktop", "w") as DesktopFile:
DesktopFile.write(DesktopFileContents) |
py | b4092efec11f035decdf49706bab59fa056ffb83 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Carousel'
db.create_table(u'cmsplugin_bootstrap_carousel_carousel', (
(u'cmsplugin_ptr', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['cms.CMSPlugin'], unique=True, primary_key=True)),
('domid', self.gf('django.db.models.fields.CharField')(max_length=50)),
('interval', self.gf('django.db.models.fields.IntegerField')(default=5000)),
))
db.send_create_signal(u'cmsplugin_bootstrap_carousel', ['Carousel'])
# Adding model 'CarouselItem'
db.create_table(u'cmsplugin_bootstrap_carousel_carouselitem', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('carousel', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['cmsplugin_bootstrap_carousel.Carousel'])),
('caption_title', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('caption_content', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('image', self.gf('django.db.models.fields.files.ImageField')(max_length=100, null=True, blank=True)),
))
db.send_create_signal(u'cmsplugin_bootstrap_carousel', ['CarouselItem'])
def backwards(self, orm):
# Deleting model 'Carousel'
db.delete_table(u'cmsplugin_bootstrap_carousel_carousel')
# Deleting model 'CarouselItem'
db.delete_table(u'cmsplugin_bootstrap_carousel_carouselitem')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
u'cmsplugin_bootstrap_carousel.carousel': {
'Meta': {'object_name': 'Carousel', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'domid': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '5000'})
},
u'cmsplugin_bootstrap_carousel.carouselitem': {
'Meta': {'object_name': 'CarouselItem'},
'caption_content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'caption_title': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'carousel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['cmsplugin_bootstrap_carousel.Carousel']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_bootstrap_carousel'] |
py | b4092f102f78a62ebb81d343423d0f3bd2b9b216 | # coding: utf-8
from __future__ import print_function, unicode_literals
import argparse
import contextlib
import os
import shutil
import sys
from collections import OrderedDict
from importlib import import_module
from luamb.version import __version__
if sys.version_info[0] == 2:
from StringIO import StringIO
else:
from io import StringIO
class CMD(object):
def __init__(self):
self.registry = OrderedDict()
def add(self, *aliases):
def decorator(func):
cmd_info = {
'cmd': aliases[0],
'callable': func,
'desc': (func.__doc__ or '').rstrip(),
'aliases': aliases[1:],
}
for alias in aliases:
self.registry[alias] = cmd_info
return func
return decorator
def resolve(self, cmd):
if cmd in self.registry:
return self.registry[cmd]['callable']
def render_help(self):
help_list = []
for cmd, cmd_info in self.registry.items():
if cmd != cmd_info['cmd']:
continue
cmd_str = cmd
if cmd_info['aliases']:
cmd_str = '{0: <6}(aliases: {1})'.format(
cmd_str, ', '.join(cmd_info['aliases']))
if cmd_info['desc']:
cmd_str = '{0: <38}{1}'.format(cmd_str, cmd_info['desc'])
help_list.append(cmd_str)
return '\n'.join(help_list)
class LuambException(Exception):
message = None
def __init__(self, message=None):
if message:
self.message = message
def __str__(self):
return self.message or self.__class__.__name__
class CommandIsShellFunction(LuambException):
message = (
"this command is implemented as a shell function "
"and cannot be called via Python script entrypoint"
)
class HererocksErrorExit(LuambException):
status = None
def __init__(self, system_exit_exc):
arg = system_exit_exc.args[0]
if isinstance(arg, int):
self.status = arg
self.message = None
else:
self.status = 1
self.message = arg
def __str__(self):
msg = 'hererocks exited with non-zero status: {}'.format(self.status)
if self.message:
msg = '{}\n{}'.format(msg, self.message)
return msg
class HererocksUncaughtException(LuambException):
exc = None
def __init__(self, exc):
self.exc = exc
if exc.args:
self.message = str(exc)
def __str__(self):
msg = 'uncaught exception while running hererocks: {}'.format(
self.exc.__class__.__name__)
if self.message:
msg = '{}\n{}'.format(msg, self.message)
return msg
def check_env_name(env_name):
if not env_name or env_name == '.' or env_name == '..' or '/' in env_name:
raise argparse.ArgumentTypeError(
"invalid env name: '{}'".format(env_name))
return env_name
class Luamb(object):
lua_types = ('lua', 'luajit', 'moonjit', 'raptorjit')
product_cli_args = {
'lua': ('-l', '--lua'),
'luajit': ('-j', '--luajit'),
'moonjit': ('-m', '--moonjit'),
'raptorjit': ('--raptorjit',),
'luarocks': ('-r', '--luarocks'),
}
product_hererocks_classes = {
'lua': 'RioLua',
'luajit': 'LuaJIT',
'moonjit': 'MoonJIT',
'raptorjit': 'RaptorJIT',
'luarocks': 'LuaRocks',
}
product_names = {
'lua': 'PUC-Rio Lua',
'luajit': 'LuaJIT',
'moonjit': 'moonjit',
'raptorjit': 'RaptorJIT',
'luarocks': 'LuaRocks',
}
cmd = CMD()
def __init__(self, env_dir, active_env=None,
lua_default=None, luarocks_default=None,
hererocks=None):
self.env_dir = env_dir
self.active_env = active_env
self.lua_default = lua_default
self.luarocks_default = luarocks_default
self.hererocks = hererocks or import_module('hererocks')
self.supported_versions = {
product_key: self._fetch_supported_versions(cls_name)
for product_key, cls_name in self.product_hererocks_classes.items()
}
def run(self, argv=None):
if not argv:
argv = sys.argv[1:]
parser = argparse.ArgumentParser(
prog='luamb',
add_help=False,
)
parser.add_argument(
'command',
nargs='?',
default='',
)
parser.add_argument(
'-h', '--help',
action='store_true',
)
parser.add_argument(
'-v', '--version',
action='version',
version='luamb ' + __version__,
help="show luamb version number and exit",
)
args = parser.parse_args(argv[:1])
self._show_main_usage = parser.print_usage
if not args.command or args.help:
self._show_main_help()
return
method = self.cmd.resolve(args.command)
if not method:
print("command '{}' not found\n"
"try 'luamb --help'".format(args.command))
else:
method(self, argv[1:])
@cmd.add('on', 'enable', 'activate')
def cmd_on(self, argv):
"""activate environment"""
raise CommandIsShellFunction
@cmd.add('off', 'disable', 'deactivate')
def cmd_off(self, argv):
"""deactivate environment"""
raise CommandIsShellFunction
@cmd.add('mk', 'new', 'create')
def cmd_mk(self, argv):
"""create new environment"""
parser = argparse.ArgumentParser(
prog='luamb mk',
add_help=False,
description="""
This command is a tiny wrapper around hererocks tool.
You can use any hererocks arguments (see below), but instead of
a full path to new environment you should specify only
its name. In addition, you can specify a project path with
-a/--associate argument.
""",
usage=(
'\n luamb mk [-a PROJECT_DIR] [--no-luarocks] HEREROCKS_ARGS '
'ENV_NAME\n'
' luamb mk --list-versions WHAT'
),
)
parser.add_argument(
'env_name',
nargs='?',
type=check_env_name,
metavar='ENV_NAME',
help="environment name (used as directory name)"
)
parser.add_argument(
'-a', '--associate',
metavar='PROJECT_DIR',
help="associate env with project",
)
parser.add_argument(
'--no-luarocks',
action='store_true',
help="don't install LuaRocks (if default version specified via "
"environment variable)",
)
parser.add_argument(
'--list-versions',
choices=self.product_names,
metavar='WHAT',
help=(
'list versions of Lua interpreter or LuaRocks '
'(one of %(choices)s) available for installation'
),
)
for product_key, product_cli_args in self.product_cli_args.items():
parser.add_argument(
*product_cli_args,
dest=product_key,
help=argparse.SUPPRESS
)
parser.add_argument(
'-v', '--version',
action='version',
version=self.hererocks.hererocks_version,
help=argparse.SUPPRESS,
)
parser.add_argument(
'-h', '--help',
action='store_true',
help=argparse.SUPPRESS,
)
args, extra_args = parser.parse_known_args(argv)
if args.help or (not args.env_name and not args.list_versions):
output = self._call_hererocks(['--help'], capture_output=True)
hererocks_help = output.partition("optional arguments:\n")[2]
parser.print_help()
print('\nhererocks arguments:')
print(hererocks_help)
return
if args.list_versions:
product_key = args.list_versions
versions = self._get_supported_versions(product_key)
print('Supported {} versions are: {}'.format(
self.product_names[product_key],
self._format_versions_string(versions),
))
print('latest and ^ are aliases for {}'.format(versions['latest']))
return
env_name = args.env_name
args_lua_types = []
for lua_type in self.lua_types:
lua_version = getattr(args, lua_type)
if lua_version is not None:
args_lua_types.append((lua_type, lua_version))
if len(args_lua_types) > 1:
raise LuambException("can't install more than one Lua interpreter")
if len(args_lua_types) == 1:
lua_type, lua_version = args_lua_types[0]
else:
if not self.lua_default:
raise LuambException(
"specify Lua version argument "
"or set default version via environment variable"
)
print(
"Lua version argument is not specified, use the default value "
"from enviroment variable"
)
lua_type, _, lua_version = self.lua_default.strip().partition(' ')
lua_type = lua_type.rstrip()
lua_version = lua_version.lstrip()
if not lua_type or not lua_version:
raise LuambException(
"Error parsing Lua version "
"environment variable: {}".format(self.lua_default)
)
self._check_product_version_is_supported(lua_type, lua_version)
if args.no_luarocks:
luarocks_version = None
elif args.luarocks is not None:
luarocks_version = args.luarocks
elif self.luarocks_default:
print(
"LuaRocks version argument is not specified, "
"use the default value from enviroment variable"
)
luarocks_version = self.luarocks_default.strip()
else:
luarocks_version = None
if luarocks_version is not None:
self._check_product_version_is_supported(
'luarocks', luarocks_version)
env_path = os.path.join(self.env_dir, env_name)
hererocks_args = [
self.product_cli_args[lua_type][-1],
lua_version,
]
if luarocks_version:
hererocks_args.extend([
self.product_cli_args['luarocks'][-1],
luarocks_version,
])
hererocks_args.extend(extra_args)
hererocks_args.append(env_path)
self._call_hererocks(hererocks_args)
if args.associate:
with open(os.path.join(env_path, '.project'), 'w') as f:
f.write(os.path.abspath(os.path.expandvars(args.associate)))
@cmd.add('rm', 'remove', 'del', 'delete')
def cmd_rm(self, argv):
"""remove environment"""
parser = argparse.ArgumentParser(prog='luamb rm')
parser.add_argument(
'env_name',
type=check_env_name,
metavar='ENV_NAME',
)
args = parser.parse_args(argv)
env_name = args.env_name
if env_name == self.active_env:
raise LuambException('cannot remove the active environment')
env_path = self._get_env_path(env_name)
try:
shutil.rmtree(env_path)
except OSError:
raise LuambException("can't delete {}".format(env_path))
print("env '{}' has been deleted".format(env_name))
@cmd.add('info', 'show')
def cmd_info(self, argv):
"""show environment info"""
if '-h' in argv or '--help' in argv:
print("usage: luamb info [ENV_NAME]")
return
env_name = argv[0] if argv else self.active_env
if not env_name:
raise LuambException("no active environment found - "
"specify environment name")
self._show_env_info(env_name, mark_active=False)
@cmd.add('ls', 'list')
def cmd_ls(self, argv):
"""list available environments"""
parser = argparse.ArgumentParser(prog='luamb ls')
parser.add_argument(
'-s', '--short',
action='store_true',
help="show only names of environments",
)
args = parser.parse_args(argv)
envs = next(os.walk(self.env_dir))[1]
envs.sort()
detail = not args.short
for env in envs:
self._show_env_info(env, detail=detail)
if detail:
print('\n')
@contextlib.contextmanager
def _maybe_capture_output(self, capture_output):
string_buffer = StringIO()
if capture_output:
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = sys.stderr = string_buffer
try:
yield string_buffer
finally:
if capture_output:
sys.stdout, sys.stderr = stdout, stderr
string_buffer.close()
def _call_hererocks(self, argv, capture_output=False):
with self._maybe_capture_output(capture_output) as output_buffer:
try:
self.hererocks.main(argv=argv)
except SystemExit as exc:
if exc.code:
raise HererocksErrorExit(exc)
except Exception as exc:
raise HererocksUncaughtException(exc)
if capture_output:
return output_buffer.getvalue()
def _show_main_help(self):
self._show_main_usage()
print("\navailable commands:\n")
print(self.cmd.render_help())
def _get_env_path(self, env_name, raise_exc=True):
env_path = os.path.join(self.env_dir, env_name)
if os.path.isdir(env_path):
return env_path
if raise_exc:
raise LuambException("environment '{}' doesn't exist".format(
env_name))
def _show_env_info(
self, env_name, detail=True, mark_active=True, raise_exc=True,
):
env_path = self._get_env_path(env_name, raise_exc=raise_exc)
if not env_path:
return
if mark_active and env_name == self.active_env:
env_name = '(' + env_name + ')'
print(env_name)
if not detail:
return
print('=' * len(env_name))
self._call_hererocks(['--show', env_path])
project_file_path = os.path.join(env_path, '.project')
if os.path.isfile(project_file_path):
with open(project_file_path) as f:
print('Project:', f.read().strip())
def _fetch_supported_versions(self, hererocks_cls_name):
try:
cls = getattr(self.hererocks, hererocks_cls_name)
except AttributeError:
return {}
versions = {v: v for v in cls.versions}
versions.update(cls.translations)
return versions
def _get_supported_versions(self, product_key, raise_exc=True):
versions = self.supported_versions[product_key]
if versions or not raise_exc:
return versions
raise LuambException(
'{} is not supported\n'
'Try to upgrade hererocks'.format(self.product_names[product_key])
)
def _format_versions_string(self, versions):
return ' '.join(sorted(versions))
def _check_product_version_is_supported(self, product_key, version):
if product_key != 'luarocks' and product_key not in self.lua_types:
raise LuambException(
'Unsupported Lua interpreter: {}'.format(product_key)
)
product_name = self.product_names[product_key]
if not version:
raise LuambException(
'{} version is not specified'.format(product_name))
if self._is_local_path_or_git_uri(version):
return
supported_versions = self._get_supported_versions(product_key)
if version not in supported_versions:
raise LuambException(
'Unsupported {} version: {}\n'
'Supported versions are: {}'.format(
product_name,
version,
self._format_versions_string(supported_versions),
)
)
def _is_local_path_or_git_uri(self, version_string, skip_path_check=False):
if '@' in version_string:
return True
if (
not version_string.startswith('/')
and not version_string.startswith('./')
and not version_string.startswith('../')
):
return False
if skip_path_check:
return True
if not os.path.exists(version_string):
raise LuambException(
"'{}' seems like local path "
"but doesn't exist".format(version_string)
)
if not os.path.isdir(version_string):
raise LuambException(
"'{}' is not a directory ".format(version_string)
)
return True
|
py | b4092f3d9e3406073ef92bda118f3a0c430b0d44 | #!/usr/bin/env python
import json
import os
import requests
import sys
import wget
import argparse
import urllib.error
version = "v1.0.1"
verbose = False
baseURL = "https://www.reddit.com/"
sorting = "top"
subreddit = "earthporn"
orientation = "landscape"
imagesToGrab = 20
minResolution = {"height": 1080, "width": 1920}
outputDir = os.path.realpath(os.path.dirname(__file__)) + "/output"
parser = argparse.ArgumentParser(description="Crawl a subreddit for suitable wallpapers")
def main():
if verbose:
sys.stdout.write("subreddit........: " + subreddit + "\n")
sys.stdout.write("sorting..........: " + sorting + "\n")
sys.stdout.write("orientation......: " + orientation + "\n")
sys.stdout.write("max images.......: " + str(imagesToGrab) + "\n")
sys.stdout.write("min height.......: " + str(minResolution["height"]) + "\n")
sys.stdout.write("min width........: " + str(minResolution["width"]) + "\n")
sys.stdout.write("output directory.: " + outputDir + "\n")
if not os.path.exists(outputDir):
try:
os.makedirs(outputDir)
except OSError:
sys.stdout.write("[ \033[0;31mERROR\033[m ] Unable to create directory: " + outputDir + "\n")
else:
cleanOutputDirectory()
grab();
return
def grab(imagesGrabbed = 0, after = ""):
response = requests.get(baseURL + subreddit + "/" + sorting + "/.json?limit=100" + after, headers = {"User-agent": "subreddit desktop wallpaper crawler " + version})
jsondata = json.loads(response.text)
if "error" in jsondata:
sys.stdout.write("[ \033[0;31mERROR\033[m ] " + jsondata["message"] + "\n")
if "data" not in jsondata:
sys.stdout.write("[ \033[0;31mERROR\033[m ] Invalid data received: " + str(jsondata) + "\n")
return
if "children" not in jsondata["data"]:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] No posts found on " + baseURL + subreddit + ".\n")
return
for item in jsondata["data"]["children"]:
postURL = "Post"
if imagesGrabbed == imagesToGrab:
return
if "data" not in item:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] Subreddit contains no data.\n")
continue
if "id" in item["data"]:
postURL = baseURL + subreddit + "/comments/" + item["data"]["id"]
if "preview" not in item["data"]:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] " + postURL + " contains no preview.\n")
continue
if "images" not in item["data"]["preview"]:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] " + postURL + " contains no images.\n")
continue
for image in item["data"]["preview"]["images"]:
if "source" not in image:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] " + postURL + " has no image source.\n")
continue
if "url" not in image["source"]:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] " + postURL + " has no image URL.\n")
continue
if "height" not in image["source"] or "width" not in image["source"]:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] " + postURL + " image does not have any dimension data.\n")
continue
if checkImageDimensions(image["source"]["height"], image["source"]["width"]):
if downloadImage(image["source"]["url"]):
imagesGrabbed += 1
else:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] " + postURL + " image does not meet dimension requirements.\n")
pass
pass
if imagesGrabbed != imagesToGrab:
if "after" not in jsondata["data"] or jsondata["data"]["after"] is None:
if verbose:
sys.stdout.write("[ \033[0;33mWARNING\033[m ] ")
sys.stdout.write("Reached end of list after " + str(imagesGrabbed) + " of " + str(imagesToGrab) + " suitable wallpapers\n")
else:
grab(imagesGrabbed, "&after=" + jsondata["data"]["after"])
return
def cleanOutputDirectory():
for dirpath, dirnames, filenames in os.walk(outputDir):
for name in filenames:
path = os.path.join(dirpath, name)
if path != outputDir + "/.gitkeep":
os.unlink(path)
return
def checkImageDimensions(height, width):
if orientation == "landscape" and height > width:
return False
if orientation == "portrait" and height < width:
return False
if minResolution["height"] > height or minResolution["width"] > width:
return False
return True
def downloadImage(url):
url = url.split("?")[0]
fileName = url.split("/")[-1]
if "preview.redd.it" in url:
url = "https://i.redd.it/" + fileName
try:
wget.download(url, outputDir + "/" + fileName, wget.bar_thermometer)
sys.stdout.write("\n")
return True
except urllib.error.HTTPError as httpError:
sys.stdout.write("[ \033[0;31mERROR\033[m ] Unable to download image from \"" + url + "\". " + str(httpError.code) + ": " + httpError.reason + "\n")
except ValueError as valueError:
sys.stdout.write("[ \033[0;31mERROR\033[m ] " + str(valueError) + "\033[m\n")
return False
parser.add_argument("-m", "--max",
dest="imagesToGrab",
metavar="",
default=imagesToGrab,
type=int,
help="Number of images to download"
)
parser.add_argument("-o", "--orientation",
dest="orientation",
metavar="",
default=orientation,
type=str,
help="Change orientation to user input [default: " + orientation + "]"
)
parser.add_argument("-r", "--subreddit",
dest="subreddit",
metavar="",
default=subreddit,
type=str,
help="Change subreddit to user input [default: r/" + subreddit + "]"
)
parser.add_argument("-s", "--sorting",
dest="sorting",
metavar="",
default=sorting,
type=str,
help="Change sorting to user input [default: " + sorting + "]"
)
parser.add_argument("-v", "--version",
action="store_true",
help="Show version information"
)
parser.add_argument("--min-width",
dest="minWidth",
metavar="",
default=minResolution["width"],
type=int,
help="Change minimum resolution width constraint to user input [default: " + str(minResolution["width"]) + "]"
)
parser.add_argument("--min-height",
dest="minHeight",
metavar="",
default=minResolution["height"],
type=int,
help="Change minimum resolution height constraint to user input [default: " + str(minResolution["height"]) + "]"
)
parser.add_argument("--output",
dest="outputDir",
metavar="",
default=outputDir,
type=str,
help="Change output directory to user input [default: " + outputDir + "]"
)
parser.add_argument("--verbose",
action="store_true",
help="Print verbose process output"
)
args = parser.parse_args()
imagesToGrab = args.imagesToGrab
sorting = args.sorting
subreddit = "r/" + args.subreddit
orientation = args.orientation
minResolution["height"] = args.minHeight
minResolution["width"] = args.minWidth
outputDir = args.outputDir
verbose = args.verbose
if args.version:
sys.stdout.write(os.path.basename(__file__) + " " + version + "\n")
sys.exit(0)
main()
|
py | b40931009ebafc5a44e94c988617c828af3dd563 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.precise import Precise
class huobi(Exchange):
def describe(self):
return self.deep_extend(super(huobi, self).describe(), {
'id': 'huobi',
'name': 'Huobi',
'countries': ['CN'],
'rateLimit': 100,
'userAgent': self.userAgents['chrome39'],
'certified': True,
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro', # api.testnet.huobi.pro
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'CORS': None,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchPremiumIndexOHLCV': False,
'fetchTicker': True,
'fetchTickers': True,
'fetchTime': True,
'fetchTrades': True,
'fetchTradingLimits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'test': {
'market': 'https://api.testnet.huobi.pro',
'public': 'https://api.testnet.huobi.pro',
'private': 'https://api.testnet.huobi.pro',
},
'logo': 'https://user-images.githubusercontent.com/1294454/76137448-22748a80-604e-11ea-8069-6e389271911d.jpg',
'api': {
'market': 'https://{hostname}',
'public': 'https://{hostname}',
'private': 'https://{hostname}',
'v2Public': 'https://{hostname}',
'v2Private': 'https://{hostname}',
},
'www': 'https://www.huobi.com',
'referral': {
'url': 'https://www.huobi.com/en-us/topic/double-reward/?invite_code=6rmm2223',
'discount': 0.15,
},
'doc': [
'https://huobiapi.github.io/docs/spot/v1/cn/',
'https://huobiapi.github.io/docs/dm/v1/cn/',
'https://huobiapi.github.io/docs/coin_margined_swap/v1/cn/',
'https://huobiapi.github.io/docs/usdt_swap/v1/cn/',
'https://huobiapi.github.io/docs/option/v1/cn/',
],
'fees': 'https://www.huobi.com/about/fee/',
},
'api': {
'v2Public': {
'get': {
'reference/currencies': 1, # 币链参考信息
'market-status': 1, # 获取当前市场状态
},
},
'v2Private': {
'get': {
'account/ledger': 1,
'account/withdraw/quota': 1,
'account/withdraw/address': 1, # 提币地址查询(限母用户可用)
'account/deposit/address': 1,
'account/repayment': 5, # 还币交易记录查询
'reference/transact-fee-rate': 1,
'account/asset-valuation': 0.2, # 获取账户资产估值
'point/account': 5, # 点卡余额查询
'sub-user/user-list': 1, # 获取子用户列表
'sub-user/user-state': 1, # 获取特定子用户的用户状态
'sub-user/account-list': 1, # 获取特定子用户的账户列表
'sub-user/deposit-address': 1, # 子用户充币地址查询
'sub-user/query-deposit': 1, # 子用户充币记录查询
'user/api-key': 1, # 母子用户API key信息查询
'user/uid': 1, # 母子用户获取用户UID
'algo-orders/opening': 1, # 查询未触发OPEN策略委托
'algo-orders/history': 1, # 查询策略委托历史
'algo-orders/specific': 1, # 查询特定策略委托
'c2c/offers': 1, # 查询借入借出订单
'c2c/offer': 1, # 查询特定借入借出订单及其交易记录
'c2c/transactions': 1, # 查询借入借出交易记录
'c2c/repayment': 1, # 查询还币交易记录
'c2c/account': 1, # 查询账户余额
'etp/reference': 1, # 基础参考信息
'etp/transactions': 5, # 获取杠杆ETP申赎记录
'etp/transaction': 5, # 获取特定杠杆ETP申赎记录
'etp/rebalance': 1, # 获取杠杆ETP调仓记录
'etp/limit': 1, # 获取ETP持仓限额
},
'post': {
'account/transfer': 1,
'account/repayment': 5, # 归还借币(全仓逐仓通用)
'point/transfer': 5, # 点卡划转
'sub-user/management': 1, # 冻结/解冻子用户
'sub-user/creation': 1, # 子用户创建
'sub-user/tradable-market': 1, # 设置子用户交易权限
'sub-user/transferability': 1, # 设置子用户资产转出权限
'sub-user/api-key-generation': 1, # 子用户API key创建
'sub-user/api-key-modification': 1, # 修改子用户API key
'sub-user/api-key-deletion': 1, # 删除子用户API key
'sub-user/deduct-mode': 1, # 设置子用户手续费抵扣模式
'algo-orders': 1, # 策略委托下单
'algo-orders/cancel-all-after': 1, # 自动撤销订单
'algo-orders/cancellation': 1, # 策略委托(触发前)撤单
'c2c/offer': 1, # 借入借出下单
'c2c/cancellation': 1, # 借入借出撤单
'c2c/cancel-all': 1, # 撤销所有借入借出订单
'c2c/repayment': 1, # 还币
'c2c/transfer': 1, # 资产划转
'etp/creation': 5, # 杠杆ETP换入
'etp/redemption': 5, # 杠杆ETP换出
'etp/{transactId}/cancel': 10, # 杠杆ETP单个撤单
'etp/batch-cancel': 50, # 杠杆ETP批量撤单
},
},
'market': {
'get': {
'history/kline': 1, # 获取K线数据
'detail/merged': 1, # 获取聚合行情(Ticker)
'depth': 1, # 获取 Market Depth 数据
'trade': 1, # 获取 Trade Detail 数据
'history/trade': 1, # 批量获取最近的交易记录
'detail': 1, # 获取 Market Detail 24小时成交量数据
'tickers': 1,
'etp': 1, # 获取杠杆ETP实时净值
},
},
'public': {
'get': {
'common/symbols': 1, # 查询系统支持的所有交易对
'common/currencys': 1, # 查询系统支持的所有币种
'common/timestamp': 1, # 查询系统当前时间
'common/exchange': 1, # order limits
'settings/currencys': 1, # ?language=en-US
},
},
'private': {
'get': {
'account/accounts': 0.2, # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance': 0.2, # 查询指定账户的余额
'account/accounts/{sub-uid}': 1,
'account/history': 4,
'cross-margin/loan-info': 1,
'margin/loan-info': 1, # 查询借币币息率及额度
'fee/fee-rate/get': 1,
'order/openOrders': 0.4,
'order/orders': 0.4,
'order/orders/{id}': 0.4, # 查询某个订单详情
'order/orders/{id}/matchresults': 0.4, # 查询某个订单的成交明细
'order/orders/getClientOrder': 0.4,
'order/history': 1, # 查询当前委托、历史委托
'order/matchresults': 1, # 查询当前成交、历史成交
# 'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw': 1,
# 'margin/loan-info', # duplicate
'margin/loan-orders': 0.2, # 借贷订单
'margin/accounts/balance': 0.2, # 借贷账户详情
'cross-margin/loan-orders': 1, # 查询借币订单
'cross-margin/accounts/balance': 1, # 借币账户详情
'points/actions': 1,
'points/orders': 1,
'subuser/aggregate-balance': 10,
'stable-coin/exchange_rate': 1,
'stable-coin/quote': 1,
},
'post': {
'account/transfer': 1, # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer': 1,
'order/batch-orders': 0.4,
'order/orders/place': 0.2, # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder': 0.2,
'order/orders/batchCancelOpenOrders': 0.4,
# 'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
# 'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel': 0.2, # 申请撤销一个订单请求
'order/orders/batchcancel': 0.4, # 批量撤销订单
# 'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create': 1, # 申请提现虚拟币
# 'dw/withdraw-virtual/create', # 申请提现虚拟币
# 'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel': 1, # 申请取消提现虚拟币
'dw/transfer-in/margin': 10, # 现货账户划入至借贷账户
'dw/transfer-out/margin': 10, # 借贷账户划出至现货账户
'margin/orders': 10, # 申请借贷
'margin/orders/{id}/repay': 10, # 归还借贷
'cross-margin/transfer-in': 1, # 资产划转
'cross-margin/transfer-out': 1, # 资产划转
'cross-margin/orders': 1, # 申请借币
'cross-margin/orders/{id}/repay': 1, # 归还借币
'stable-coin/exchange': 1,
'subuser/transfer': 10,
},
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': self.parse_number('0.002'),
'taker': self.parse_number('0.002'),
},
},
'exceptions': {
'broad': {
'contract is restricted of closing positions on API. Please contact customer service': OnMaintenance,
'maintain': OnMaintenance,
},
'exact': {
# err-code
'bad-request': BadRequest,
'base-date-limit-error': BadRequest, # {"status":"error","err-code":"base-date-limit-error","err-msg":"date less than system limit","data":null}
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-holding-limit-failed': InvalidOrder, # {"status":"error","err-code":"order-holding-limit-failed","err-msg":"Order failed, exceeded the holding limit of self currency","data":null}
'order-orderprice-precision-error': InvalidOrder, # {"status":"error","err-code":"order-orderprice-precision-error","err-msg":"order price precision error, scale: `4`","data":null}
'order-etp-nav-price-max-error': InvalidOrder, # {"status":"error","err-code":"order-etp-nav-price-max-error","err-msg":"Order price cannot be higher than 5% of NAV","data":null}
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
},
},
'options': {
'networks': {
'ETH': 'erc20',
'TRX': 'trc20',
'HRC20': 'hrc20',
'HECO': 'hrc20',
'HT': 'hrc20',
'ALGO': 'algo',
'OMNI': '',
},
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'private_get_order_orders', # 'private_get_order_history' # https://github.com/ccxt/ccxt/pull/5392
'fetchOpenOrdersMethod': 'fetch_open_orders_v1', # 'fetch_open_orders_v2' # https://github.com/ccxt/ccxt/issues/5388
'createMarketBuyOrderRequiresPrice': True,
'fetchMarketsMethod': 'publicGetCommonSymbols',
'fetchBalanceMethod': 'privateGetAccountAccountsIdBalance',
'createOrderMethod': 'privatePostOrderOrdersPlace',
'language': 'en-US',
'broker': {
'id': 'AA03022abc',
},
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'HIT': 'HitChain',
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'BIFI': 'Bitcoin File', # conflict with Beefy.Finance https://github.com/ccxt/ccxt/issues/8706
},
})
async def fetch_time(self, params={}):
response = await self.publicGetCommonTimestamp(params)
return self.safe_integer(response, 'data')
async def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
await self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = await self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
async def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = await self.publicGetCommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_number(limits, 'limit-order-must-greater-than'),
'max': self.safe_number(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
async def fetch_markets(self, params={}):
method = self.options['fetchMarketsMethod']
response = await getattr(self, method)(params)
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = self.safe_string(market, 'base-currency')
quoteId = self.safe_string(market, 'quote-currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'amount-precision'),
'price': self.safe_integer(market, 'price-precision'),
'cost': self.safe_integer(market, 'value-precision'),
}
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
minAmount = self.safe_number(market, 'min-order-amt', math.pow(10, -precision['amount']))
maxAmount = self.safe_number(market, 'max-order-amt')
minCost = self.safe_number(market, 'min-order-value', 0)
state = self.safe_string(market, 'state')
active = (state == 'online')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
symbol = self.safe_symbol(None, market)
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_number(ticker['bid'], 0)
bidVolume = self.safe_number(ticker['bid'], 1)
else:
bid = self.safe_number(ticker, 'bid')
bidVolume = self.safe_value(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_number(ticker['ask'], 0)
askVolume = self.safe_number(ticker['ask'], 1)
else:
ask = self.safe_number(ticker, 'ask')
askVolume = self.safe_value(ticker, 'askSize')
open = self.safe_number(ticker, 'open')
close = self.safe_number(ticker, 'close')
baseVolume = self.safe_number(ticker, 'amount')
quoteVolume = self.safe_number(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return self.safe_ticker({
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': None,
'percentage': None,
'average': None,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}, market)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': 'step0',
}
response = await self.marketGetDepth(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ts":1583474832008,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, symbol, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = await self.marketGetDetailMerged(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
ticker = self.parse_ticker(response['tick'], market)
timestamp = self.safe_integer(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
response = await self.marketGetTickers(params)
tickers = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
marketId = self.safe_string(tickers[i], 'symbol')
market = self.safe_market(marketId)
symbol = market['symbol']
ticker = self.parse_ticker(tickers[i], market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
#
# fetchMyTrades(private)
#
# {
# 'symbol': 'swftcbtc',
# 'fee-currency': 'swftc',
# 'filled-fees': '0',
# 'source': 'spot-api',
# 'id': 83789509854000,
# 'type': 'buy-limit',
# 'order-id': 83711103204909,
# 'filled-points': '0.005826843283532154',
# 'fee-deduct-currency': 'ht',
# 'filled-amount': '45941.53',
# 'price': '0.0000001401',
# 'created-at': 1597933260729,
# 'match-id': 100087455560,
# 'role': 'maker',
# 'trade-id': 100050305348
# },
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string(trade, 'role')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled-amount', 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
fee = None
feeCost = self.safe_number(trade, 'filled-fees')
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-currency'))
filledPoints = self.safe_number(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or (feeCost == 0.0):
feeCost = filledPoints
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency'))
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string(trade, 'id', tradeId)
return {
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
async def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrderOrdersIdMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], None, since, limit)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # 1-100 orders, default is 100
if since is not None:
request['start-time'] = since # a date within 120 days from today
# request['end-time'] = self.sum(since, 172800000) # 48 hours window
response = await self.privateGetOrderMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
async def fetch_trades(self, symbol, since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryTrade(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'amount'),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = await self.marketGetHistoryKline(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
async def fetch_accounts(self, params={}):
await self.load_markets()
response = await self.privateGetAccountAccounts(params)
return response['data']
async def fetch_currencies(self, params={}):
request = {
'language': self.options['language'],
}
response = await self.publicGetSettingsCurrencys(self.extend(request, params))
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
#
# { name: "ctxc",
# 'display-name': "CTXC",
# 'withdraw-precision': 8,
# 'currency-type': "eth",
# 'currency-partition': "pro",
# 'support-sites': null,
# 'otc-enable': 0,
# 'deposit-min-amount': "2",
# 'withdraw-min-amount': "4",
# 'show-precision': "8",
# weight: "2988",
# visible: True,
# 'deposit-desc': "Please don’t deposit any other digital assets except CTXC t…",
# 'withdraw-desc': "Minimum withdrawal amount: 4 CTXC. not >_<not For security reason…",
# 'deposit-enabled': True,
# 'withdraw-enabled': True,
# 'currency-addr-with-tag': False,
# 'fast-confirms': 15,
# 'safe-confirms': 30 }
#
id = self.safe_value(currency, 'name')
precision = self.safe_integer(currency, 'withdraw-precision')
code = self.safe_currency_code(id)
active = currency['visible'] and currency['deposit-enabled'] and currency['withdraw-enabled']
name = self.safe_string(currency, 'display-name')
result[code] = {
'id': id,
'code': code,
'type': 'crypto',
# 'payin': currency['deposit-enabled'],
# 'payout': currency['withdraw-enabled'],
# 'transfer': None,
'name': name,
'active': active,
'fee': None, # todo need to fetch from fee endpoint
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_number(currency, 'deposit-min-amount'),
'max': math.pow(10, precision),
},
'withdraw': {
'min': self.safe_number(currency, 'withdraw-min-amount'),
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
async def fetch_balance(self, params={}):
await self.load_markets()
await self.load_accounts()
method = self.options['fetchBalanceMethod']
request = {
'id': self.accounts[0]['id'],
}
response = await getattr(self, method)(self.extend(request, params))
balances = self.safe_value(response['data'], 'list', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_string(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
return self.parse_balance(result)
async def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'private_get_order_orders')
response = await getattr(self, method)(self.extend(request, params))
#
# {status: "ok",
# data: [{ id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 } ]}
#
return self.parse_orders(response['data'], market, since, limit)
async def fetch_order(self, id, symbol=None, params={}):
await self.load_markets()
request = {
'id': id,
}
response = await self.privateGetOrderOrdersId(self.extend(request, params))
order = self.safe_value(response, 'data')
return self.parse_order(order)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOpenOrdersMethod', 'fetch_open_orders_v1')
return await getattr(self, method)(symbol, since, limit, params)
async def fetch_open_orders_v1(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrdersV1() requires a symbol argument')
return await self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return await self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
async def fetch_open_orders_v2(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
request = {}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
await self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request['account-id'] = accountId
if limit is not None:
request['size'] = limit
omitted = self.omit(params, 'account-id')
response = await self.privateGetOrderOpenOrders(self.extend(request, omitted))
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# { id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
# { id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
id = self.safe_string(order, 'id')
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string(order, 'state'))
marketId = self.safe_string(order, 'symbol')
market = self.safe_market(marketId, market)
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(order, 'created-at')
clientOrderId = self.safe_string(order, 'client-order-id')
amount = self.safe_number(order, 'amount')
filled = self.safe_number_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
price = self.safe_number(order, 'price')
if price == 0.0:
price = None
cost = self.safe_number_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
feeCost = self.safe_number_2(order, 'filled-fees', 'field-fees') # typo in their API, filled fees
fee = None
if feeCost is not None:
feeCurrency = None
if market is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': clientOrderId,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': None,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
})
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
await self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'symbol': market['id'],
'type': side + '-' + type,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client-order-id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client-order-id'] = brokerId + self.uuid()
else:
request['client-order-id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client-order-id'])
if (type == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
if type == 'limit' or type == 'ioc' or type == 'limit-maker':
request['price'] = self.price_to_precision(symbol, price)
method = self.options['createOrderMethod']
response = await getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
async def cancel_order(self, id, symbol=None, params={}):
response = await self.privatePostOrderOrdersIdSubmitcancel({'id': id})
#
# response = {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
async def cancel_orders(self, ids, symbol=None, params={}):
await self.load_markets()
clientOrderIds = self.safe_value_2(params, 'clientOrderIds', 'client-order-ids')
params = self.omit(params, ['clientOrderIds', 'client-order-ids'])
request = {}
if clientOrderIds is None:
request['order-ids'] = ids
else:
request['client-order-ids'] = clientOrderIds
response = await self.privatePostOrderOrdersBatchcancel(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "success": [
# "5983466"
# ],
# "failed": [
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "first"
# },
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "second"
# },
# {
# "err-msg": "The record is not found.",
# "order-id": "",
# "err-code": "base-not-found",
# "client-order-id": "third"
# }
# ]
# }
# }
#
return response
async def cancel_all_orders(self, symbol=None, params={}):
await self.load_markets()
request = {
# 'account-id' string False NA The account id used for self cancel Refer to GET /v1/account/accounts
# 'symbol': market['id'], # a list of comma-separated symbols, all symbols by default
# 'types' 'string', buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'side': 'buy', # or 'sell'
# 'size': 100, # the number of orders to cancel 1-100
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = await self.privatePostOrderOrdersBatchCancelOpenOrders(self.extend(request, params))
#
# {
# code: 200,
# data: {
# "success-count": 2,
# "failed-count": 0,
# "next-id": 5454600
# }
# }
#
return response
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = await self.v2PrivateGetAccountDepositAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
chain = self.safe_string(params, 'chain')
if chain is None:
network = self.safe_string(params, 'network')
if network is None:
return self.parse_deposit_address(self.safe_value(data, 0, {}), currency)
networks = self.safe_value(self.options, 'networks', {})
chain = self.safe_string_lower(networks, network, network)
# possible chains - usdterc20, trc20usdt, hrc20usdt, usdt, algousdt
if chain == 'erc20':
chain = currency['id'] + chain
else:
chain = chain + currency['id']
for i in range(0, len(data)):
entry = data[i]
entryChain = self.safe_string(entry, 'chain')
if entryChain == chain:
return self.parse_deposit_address(entry, currency)
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = await self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
await self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = await self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'address'),
'tag': tag,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
async def withdraw(self, code, amount, address, tag=None, params={}):
tag, params = self.handle_withdraw_tag_and_params(tag, params)
await self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
networks = self.safe_value(self.options, 'networks', {})
network = self.safe_string_upper(params, 'network') # self line allows the user to specify either ERC20 or ETH
network = self.safe_string_lower(networks, network, network) # handle ETH>ERC20 alias
if network is not None:
# possible chains - usdterc20, trc20usdt, hrc20usdt, usdt, algousdt
if network == 'erc20':
request['chain'] = currency['id'] + network
else:
request['chain'] = network + currency['id']
params = self.omit(params, 'network')
response = await self.privatePostDwWithdrawApiCreate(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
# eslint-disable-next-line quotes
payload = "\n".join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def calculate_rate_limiter_cost(self, api, method, path, params, config={}, context={}):
return self.safe_integer(config, 'cost', 1)
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string(response, 'err-msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
|
py | b409315885368cf3f065f950857e8984e4418d40 | """
This file defines the classes used to represent a 'coordinate', which includes
axes, ticks, tick labels, and grid lines.
"""
import numpy as np
from matplotlib.ticker import Formatter
from matplotlib.transforms import Affine2D, ScaledTranslation
from matplotlib.patches import PathPatch
from .formatter_locator import AngleFormatterLocator, ScalarFormatterLocator
from .ticks import Ticks
from .ticklabels import TickLabels
from .axislabels import AxisLabels
from .grid_paths import get_lon_lat_path, get_gridline_path
from . import settings
from . import six
__all__ = ['CoordinateHelper']
def wrap_angle_at(values, coord_wrap):
return np.mod(values - coord_wrap, 360.) - (360. - coord_wrap)
class CoordinateHelper(object):
def __init__(self, parent_axes=None, transform=None, coord_index=None,
coord_type='scalar', coord_wrap=None, frame=None):
# Keep a reference to the parent axes and the transform
self.parent_axes = parent_axes
self.transform = transform
self.coord_index = coord_index
self.coord_type = coord_type
self.frame = frame
if coord_type == 'longitude' and coord_wrap is None:
self.coord_wrap = 360
elif coord_type != 'longitude' and coord_wrap is not None:
raise NotImplementedError('coord_wrap is not yet supported for non-longitude coordinates')
else:
self.coord_wrap = coord_wrap
# Initialize tick formatter/locator
if coord_type == 'scalar':
self._formatter_locator = ScalarFormatterLocator()
elif coord_type in ['longitude', 'latitude']:
self._formatter_locator = AngleFormatterLocator()
else:
raise ValueError("coord_type should be one of 'scalar', 'longitude', or 'latitude'")
# Initialize ticks
self.dpi_transform = Affine2D()
self.offset_transform = ScaledTranslation(0, 0, self.dpi_transform)
self.ticks = Ticks(transform=parent_axes.transData + self.offset_transform)
# Initialize tick labels
self.ticklabels = TickLabels(transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize axis labels
self.axislabels = AxisLabels(self.frame,
transform=None, # display coordinates
figure=parent_axes.get_figure())
# Initialize container for the grid lines
self.grid_lines = []
self.grid_lines_kwargs = {'visible':False,
'facecolor':'none',
'transform':self.parent_axes.transData}
def grid(self, draw_grid=True, grid_type='lines', **kwargs):
"""
Plot grid lines for this coordinate.
Standard matplotlib appearance options (color, alpha, etc.) can be
passed as keyword arguments.
Parameters
----------
draw_grid : bool
Whether to show the gridlines
grid_type : { 'lines' | 'contours' }
Whether to plot the contours by determining the grid lines in
world coordinates and then plotting them in world coordinates
(``'lines'``) or by determining the world coordinates at many
positions in the image and then drawing contours
(``'contours'``). The first is recommended for 2-d images, while
for 3-d (or higher dimensional) cubes, the ``'contours'`` option
is recommended.
"""
if grid_type in ('lines', 'contours'):
self._grid_type = grid_type
else:
raise ValueError("grid_type should be 'lines' or 'contours'")
if 'color' in kwargs:
kwargs['edgecolor'] = kwargs.pop('color')
self.grid_lines_kwargs.update(kwargs)
if self.grid_lines_kwargs['visible']:
if not draw_grid:
self.grid_lines_kwargs['visible'] = False
else:
self.grid_lines_kwargs['visible'] = True
def set_major_formatter(self, formatter):
"""
Set the formatter to use for the major tick labels.
Parameters
----------
formatter : str or Formatter
The format or formatter to use.
"""
if isinstance(formatter, Formatter):
raise NotImplementedError() # figure out how to swap out formatter
elif isinstance(formatter, six.string_types):
self._formatter_locator.format = formatter
else:
raise TypeError("formatter should be a string or a Formatter "
"instance")
def set_ticks(self, values=None, spacing=None, number=None, size=None,
color=None, alpha=None):
"""
Set the location and properties of the ticks.
At most one of the options from ``values``, ``spacing``, or
``number`` can be specified.
Parameters
----------
values : iterable, optional
The coordinate values at which to show the ticks.
spacing : float, optional
The spacing between ticks.
number : float, optional
The approximate number of ticks shown.
size : float, optional
The length of the ticks in points
color : str or tuple
A valid Matplotlib color for the ticks
"""
if sum([values is None, spacing is None, number is None]) < 2:
raise ValueError("At most one of values, spacing, or number should "
"be specified")
if values is not None:
self._formatter_locator.values = values
elif spacing is not None:
self._formatter_locator.spacing = spacing
elif number is not None:
self._formatter_locator.number = number
if size is not None:
self.ticks.set_ticksize(size)
if color is not None:
self.ticks.set_color(color)
if alpha is not None:
self.ticks.set_alpha(alpha)
def set_ticks_position(self, position):
"""
Set where ticks should appear
Parameters
----------
position : str
The axes on which the ticks for this coordinate should appear.
Should be a string containing zero or more of ``'b'``, ``'t'``,
``'l'``, ``'r'``. For example, ``'lb'`` will lead the ticks to be
shown on the left and bottom axis.
"""
self.ticks.set_visible_axes(position)
def set_ticklabel(self, **kwargs):
"""
Set the visual properties for the tick labels.
Parameters
----------
kwargs
Keyword arguments are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
self.ticklabels.set(**kwargs)
def set_ticklabel_position(self, position):
"""
Set where tick labels should appear
Parameters
----------
position : str
The axes on which the tick labels for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
tick labels to be shown on the left and bottom axis.
"""
self.ticklabels.set_visible_axes(position)
def set_axislabel(self, text, **kwargs):
"""
Set the text and optionally visual properties for the axis label.
Parameters
----------
text : str
The axis label text.
kwargs
Keywords are passed to :class:`matplotlib.text.Text`. These
can include keywords to set the ``color``, ``size``, ``weight``, and
other text properties.
"""
self.axislabels.set_text(text)
self.axislabels.set(**kwargs)
def get_axislabel(self):
"""
Get the text for the axis label
Returns
-------
label : str
The axis label
"""
return self.axislabels.get_text()
def set_axislabel_position(self, position):
"""
Set where axis labels should appear
Parameters
----------
position : str
The axes on which the axis label for this coordinate should
appear. Should be a string containing zero or more of ``'b'``,
``'t'``, ``'l'``, ``'r'``. For example, ``'lb'`` will lead the
axis label to be shown on the left and bottom axis.
"""
self.axislabels.set_visible_axes(position)
@property
def locator(self):
return self._formatter_locator.locator
@property
def formatter(self):
return self._formatter_locator.formatter
def _draw(self, renderer, bboxes):
renderer.open_group('coordinate_axis')
self._update_ticks(renderer)
self.ticks.draw(renderer)
self.ticklabels.draw(renderer, bboxes=bboxes)
if self.grid_lines_kwargs['visible']:
if self._grid_type == 'lines':
self._update_grid_lines()
else:
self._update_grid_contour()
if self._grid_type == 'lines':
for path in self.grid_lines:
p = PathPatch(path, **self.grid_lines_kwargs)
p.set_clip_path(self.frame.path, Affine2D())
p.draw(renderer)
else:
for line in self.grid.collections:
line.set(**self.grid_lines_kwargs)
line.draw(renderer)
renderer.close_group('coordinate_axis')
def _draw_axislabels(self, renderer, bboxes):
renderer.open_group('axis labels')
self.axislabels.draw(renderer, bboxes=bboxes)
renderer.close_group('axis labels')
def _update_ticks(self, renderer):
# TODO: this method should be optimized for speed
# Here we determine the location and rotation of all the ticks. For
# each axis, we can check the intersections for the specific
# coordinate and once we have the tick positions, we can use the WCS
# to determine the rotations.
# Find the range of coordinates in all directions
coord_range = self.parent_axes.get_coord_range(self.transform)
# First find the ticks we want to show
tick_world_coordinates, spacing = self._formatter_locator.locator(*coord_range[self.coord_index])
# We want to allow non-standard rectangular frames, so we just rely on
# the parent axes to tell us what the bounding frame is.
frame = self.frame.sample(settings.FRAME_BOUNDARY_SAMPLES)
self.ticks.clear()
self.ticklabels.clear()
lblinfo = []
lbl_world = []
for axis, spine in frame.iteritems():
# Determine tick rotation in display coordinates and compare to
# the normal angle in display coordinates.
pixel0 = spine.data
world0 = spine.world[:,self.coord_index]
world0 = self.transform.transform(pixel0)[:,self.coord_index]
pixel1 = pixel0.copy()
pixel1[:,0] += 1
world1 = self.transform.transform(pixel1)[:,self.coord_index]
pixel2 = pixel0.copy()
pixel2[:,1] += 1 if self.frame.origin == 'lower' else -1
world2 = self.transform.transform(pixel2)[:,self.coord_index]
dx = (world1 - world0)
dy = (world2 - world0)
# Rotate by 90 degrees
dx, dy = -dy, dx
if self.coord_type == 'longitude':
# Here we wrap at 180 not self.coord_wrap since we want to
# always ensure abs(dx) < 180 and abs(dy) < 180
dx = wrap_angle_at(dx, 180.)
dy = wrap_angle_at(dy, 180.)
tick_angle = np.degrees(np.arctan2(dy, dx))
normal_angle_full = np.hstack([spine.normal_angle, spine.normal_angle[-1]])
reset = (((normal_angle_full - tick_angle) % 360 > 90.) &
((tick_angle - normal_angle_full) % 360 > 90.))
tick_angle[reset] -= 180.
# We find for each interval the starting and ending coordinate,
# ensuring that we take wrapping into account correctly for
# longitudes.
w1 = spine.world[:-1, self.coord_index]
w2 = spine.world[1:, self.coord_index]
if self.coord_type == 'longitude':
w1 = wrap_angle_at(w1, self.coord_wrap)
w2 = wrap_angle_at(w2, self.coord_wrap)
w1[w2 - w1 > 180.] += 360
w2[w1 - w2 > 180.] += 360
# For longitudes, we need to check ticks as well as ticks + 360,
# since the above can produce pairs such as 359 to 361 or 0.5 to
# 1.5, both of which would match a tick at 0.75. Otherwise we just
# check the ticks determined above.
if self.coord_type == 'longitude':
tick_world_coordinates = np.hstack([tick_world_coordinates,
tick_world_coordinates + 360.])
for t in tick_world_coordinates:
# Find steps where a tick is present
intersections = np.nonzero(((t - w1) * (t - w2)) < 0)[0]
# Loop over ticks, and find exact pixel coordinates by linear
# interpolation
for imin in intersections:
imax = imin + 1
frac = (t - w1[imin]) / (w2[imin] - w1[imin])
x_data_i = spine.data[imin, 0] + frac * (spine.data[imax, 0] - spine.data[imin, 0])
y_data_i = spine.data[imin, 1] + frac * (spine.data[imax, 1] - spine.data[imin, 1])
x_pix_i = spine.pixel[imin, 0] + frac * (spine.pixel[imax, 0] - spine.pixel[imin, 0])
y_pix_i = spine.pixel[imin, 1] + frac * (spine.pixel[imax, 1] - spine.pixel[imin, 1])
delta_angle = tick_angle[imax] - tick_angle[imin]
if delta_angle > 180.:
delta_angle -= 360.
elif delta_angle < -180.:
delta_angle += 360.
angle_i = tick_angle[imin] + frac * delta_angle
if self.coord_type == 'longitude':
world = wrap_angle_at(t, self.coord_wrap)
else:
world = t
self.ticks.add(axis=axis,
pixel=(x_data_i, y_data_i),
world=world,
angle=angle_i,
axis_displacement=imin + frac)
# store information to pass to ticklabels.add
# it's faster to format many ticklabels at once outside
# of the loop
lblinfo.append(dict(axis=axis,
pixel=(x_pix_i, y_pix_i),
world=world,
angle=spine.normal_angle[imin],
axis_displacement=imin + frac))
lbl_world.append(world)
# format tick labels, add to scene
text = self._formatter_locator.formatter(lbl_world, spacing=spacing)
for kwargs, txt in zip(lblinfo, text):
self.ticklabels.add(text=txt, **kwargs)
def _update_grid_lines(self):
# For 3-d WCS with a correlated third axis, the *proper* way of
# drawing a grid should be to find the world coordinates of all pixels
# and drawing contours. What we are doing here assumes that we can
# define the grid lines with just two of the coordinates (and
# therefore assumes that the other coordinates are fixed and set to
# the value in the slice). Here we basically assume that if the WCS
# had a third axis, it has been abstracted away in the transformation.
coord_range = self.parent_axes.get_coord_range(self.transform)
tick_world_coordinates, spacing = self._formatter_locator.locator(*coord_range[self.coord_index])
self.grid_lines = []
for w in tick_world_coordinates:
if self.coord_index == 0:
x_world = np.repeat(w, 1000)
y_world = np.linspace(coord_range[1][0], coord_range[1][1], 1000)
else:
x_world = np.linspace(coord_range[0][0], coord_range[0][1], 1000)
y_world = np.repeat(w, 1000)
xy_world = np.vstack([x_world, y_world]).transpose()
self.grid_lines.append(self._get_gridline(xy_world))
def _get_gridline(self, xy_world):
if self.coord_type == 'scalar':
return get_gridline_path(self.parent_axes, self.transform, xy_world)
else:
return get_lon_lat_path(self.parent_axes, self.transform, xy_world)
def _update_grid_contour(self):
xmin, xmax = self.parent_axes.get_xlim()
ymin, ymax = self.parent_axes.get_ylim()
X, Y, field = self.transform.get_coord_slices(xmin, xmax, ymin, ymax, 200, 200)
coord_range = self.parent_axes.get_coord_range(self.transform)
tick_world_coordinates, spacing = self._formatter_locator.locator(*coord_range[self.coord_index])
field = field[self.coord_index]
if self.coord_type == 'longitude':
# Find biggest gap in tick_world_coordinates and wrap in middle
# For now just assume spacing is equal, so any mid-point will do
mid = 0.5 * (tick_world_coordinates[0] + tick_world_coordinates[1])
field = wrap_angle_at(field, mid)
tick_world_coordinates = wrap_angle_at(tick_world_coordinates, mid)
# Replace wraps by NaN
reset = (np.abs(np.diff(field[:,:-1], axis=0)) > 180) | (np.abs(np.diff(field[:-1,:], axis=1)) > 180)
field[:-1,:-1][reset] = np.nan
field[1:,:-1][reset] = np.nan
field[:-1,1:][reset] = np.nan
field[1:,1:][reset] = np.nan
self.grid = self.parent_axes.contour(X, Y, field.transpose(), levels=tick_world_coordinates)
|
py | b40931e8448b2b859b93ba3ea79573ad56949703 | __all__ = ['TenantURL', 'TenantName', 'ProjectName', 'Username']
class TenantURL(str):
"""TACC.cloud tenant base URL"""
def __new__(cls, value):
# value = str(value).lower()
return str.__new__(cls, value)
class TenantName(str):
"""TACC.cloud tenant name"""
def __new__(cls, value):
# value = str(value).lower()
return str.__new__(cls, value)
class ProjectName(str):
"""TACC.cloud project name"""
def __new__(cls, value):
# value = str(value).lower()
return str.__new__(cls, value)
class Username(str):
"""TACC.cloud username"""
def __new__(cls, value):
# value = str(value).lower()
return str.__new__(cls, value)
|
py | b4093250fb1b431a04704a996fa359dffee196d1 | import numpy as np
import openmdao.api as om
from .vandermonde_control_interp_comp import VandermondeControlInterpComp
from .state_rate_collector_comp import StateRateCollectorComp
from .tau_comp import TauComp
from ...utils.introspection import get_targets, configure_controls_introspection,\
configure_time_introspection, configure_parameters_introspection, \
configure_states_discovery, configure_states_introspection
from ...utils.misc import get_rate_units
class ODEEvaluationGroup(om.Group):
"""
A group whose purpose is to evaluate the ODE and output the computed state rates.
Parameters
----------
ode_class : class
The class of the OpenMDAO system to be used to evaluate the ODE in this Group.
time_options : OptionsDictionary
OptionsDictionary of time options.
state_options : dict of {str: OptionsDictionary}
For each state variable, a dictionary of its options, keyed by name.
parameter_options : dict of {str: OptionsDictionary}
For each parameter, a dictionary of its options, keyed by name.
control_options : dict of {str: OptionsDictionary}
For each control variable, a dictionary of its options, keyed by name.
polynomial_control_options : dict of {str: OptionsDictionary}
For each polynomial variable, a dictionary of its options, keyed by name.
ode_init_kwargs : dict
A dictionary of keyword arguments to be passed to the instantiation of the ODE.
grid_data : GridData
The GridData instance pertaining to the phase to which this ODEEvaluationGroup belongs.
**kwargs : dict
Additional keyword arguments passed to Group.
"""
def __init__(self, ode_class, time_options, state_options, parameter_options, control_options,
polynomial_control_options, ode_init_kwargs=None,
grid_data=None, **kwargs):
super().__init__(**kwargs)
# Get the state vector. This isn't necessarily ordered
# so just pick the default ordering and go with it.
self.state_options = state_options
self.parameter_options = parameter_options
self.time_options = time_options
self.control_options = control_options
self.polynomial_control_options = polynomial_control_options
self.control_interpolants = {}
self.polynomial_control_interpolants = {}
self.ode_class = ode_class
self.grid_data = grid_data
self.ode_init_kwargs = {} if ode_init_kwargs is None else ode_init_kwargs
def setup(self):
"""
Define the structure of the ODEEvaluationGroup.
"""
gd = self.grid_data
# All states, controls, parameters, and polyomial controls need to exist
# in the ODE evaluation group regardless of whether or not they have targets in the ODE.
# This makes taking the derivatives more consistent without Exceptions.
self._ivc = self.add_subsystem('ivc', om.IndepVarComp(), promotes_outputs=['*'])
if self.control_options or self.polynomial_control_options:
c_options = self.control_options
pc_options = self.polynomial_control_options
# Add a component to compute the current non-dimensional phase time.
self.add_subsystem('tau_comp', TauComp(grid_data=self.grid_data,
time_units=self.time_options['units']),
promotes_inputs=['time', 't_initial', 't_duration'],
promotes_outputs=['stau', 'ptau', 'dstau_dt', 'time_phase', 'segment_index'])
# Add control interpolant
self._control_comp = self.add_subsystem('control_interp',
VandermondeControlInterpComp(grid_data=gd,
control_options=c_options,
polynomial_control_options=pc_options,
time_units=self.time_options['units']),
promotes_inputs=['ptau', 'stau', 'dstau_dt', 'segment_index'])
self.add_subsystem('ode', self.ode_class(num_nodes=1, **self.ode_init_kwargs))
self.add_subsystem('state_rate_collector',
StateRateCollectorComp(state_options=self.state_options,
time_units=self.time_options['units']))
def configure(self):
"""
Perform I/O creation for this group's underlying members.
In dymos, this system sits within a subproblem and therefore isn't in the standard
configuration chain. We need to perform all of the introspection of the ODE here.
"""
ode = self._get_subsystem('ode')
configure_time_introspection(self.time_options, ode)
self._configure_time()
configure_parameters_introspection(self.parameter_options, ode)
self._configure_params()
configure_controls_introspection(self.control_options, ode,
time_units=self.time_options['units'])
self._configure_controls()
configure_controls_introspection(self.polynomial_control_options, ode,
time_units=self.time_options['units'])
self._configure_polynomial_controls()
if self.control_options or self.polynomial_control_options:
self._get_subsystem('control_interp').configure_io()
configure_states_discovery(self.state_options, ode)
configure_states_introspection(self.state_options, self.time_options, self.control_options,
self.parameter_options,
self.polynomial_control_options, ode)
self._configure_states()
self.state_rate_collector.configure_io()
def _configure_time(self):
targets = self.time_options['targets']
time_phase_targets = self.time_options['time_phase_targets']
t_initial_targets = self.time_options['t_initial_targets']
t_duration_targets = self.time_options['t_duration_targets']
units = self.time_options['units']
for tgts, var in [(targets, 'time'), (time_phase_targets, 'time_phase'),
(t_initial_targets, 't_initial'), (t_duration_targets, 't_duration')]:
if var != 'time_phase':
self._ivc.add_output(var, shape=(1,), units=units)
for t in tgts:
self.promotes('ode', inputs=[(t, var)])
if tgts:
self.set_input_defaults(name=var,
val=np.ones((1,)),
units=units)
def _configure_states(self):
for name, options in self.state_options.items():
shape = options['shape']
units = options['units']
targets = options['targets'] if options['targets'] is not None else []
rate_path, rate_io = self._get_rate_source_path(name)
var_name = f'states:{name}'
self._ivc.add_output(var_name, shape=shape, units=units)
self.add_design_var(var_name)
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, var_name)])
if targets:
self.set_input_defaults(name=var_name,
val=np.ones(shape),
units=options['units'])
# If the state rate source is an output, connect it, otherwise
# promote it to the appropriate name
if rate_io == 'output':
self.connect(rate_path, f'state_rate_collector.state_rates_in:{name}_rate')
else:
self.promotes('state_rate_collector',
inputs=[(f'state_rates_in:{name}_rate', rate_path)])
self.add_constraint(f'state_rate_collector.state_rates:{name}_rate')
def _configure_params(self):
for name, options in self.parameter_options.items():
shape = options['shape']
targets = get_targets(ode=self.ode, name=name, user_targets=options['targets'])
units = options['units']
var_name = f'parameters:{name}'
self._ivc.add_output(var_name, shape=shape, units=units)
self.add_design_var(var_name)
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, var_name)])
if targets:
self.set_input_defaults(name=var_name,
val=np.ones(shape),
units=options['units'])
def _configure_controls(self):
configure_controls_introspection(self.control_options, self.ode)
time_units = self.time_options['units']
if self.control_options:
gd = self.grid_data
if gd is None:
raise ValueError('ODEEvaluationGroup was provided with control options but '
'a GridData object was not provided.')
num_control_input_nodes = gd.subset_num_nodes['control_input']
for name, options in self.control_options.items():
shape = options['shape']
units = options['units']
rate_units = get_rate_units(units, time_units, deriv=1)
rate2_units = get_rate_units(units, time_units, deriv=2)
targets = options['targets']
rate_targets = options['rate_targets']
rate2_targets = options['rate2_targets']
uhat_name = f'controls:{name}'
u_name = f'control_values:{name}'
u_rate_name = f'control_rates:{name}_rate'
u_rate2_name = f'control_rates:{name}_rate2'
self._ivc.add_output(uhat_name, shape=(num_control_input_nodes,) + shape, units=units)
self.add_design_var(uhat_name)
self.promotes('control_interp', inputs=[uhat_name],
outputs=[u_name, u_rate_name, u_rate2_name])
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, u_name)])
if targets:
self.set_input_defaults(name=u_name,
val=np.ones(shape),
units=options['units'])
# Promote rate targets from the ODE
for tgt in rate_targets:
self.promotes('ode', inputs=[(tgt, u_rate_name)])
if rate_targets:
self.set_input_defaults(name=u_rate_name,
val=np.ones(shape),
units=rate_units)
# Promote rate2 targets from the ODE
for tgt in rate2_targets:
self.promotes('ode', inputs=[(tgt, u_rate2_name)])
if rate2_targets:
self.set_input_defaults(name=u_rate2_name,
val=np.ones(shape),
units=rate2_units)
def _configure_polynomial_controls(self):
configure_controls_introspection(self.polynomial_control_options, self.ode)
if self.polynomial_control_options:
time_units = self.time_options['units']
gd = self.grid_data
if gd is None:
raise ValueError('ODEEvaluationGroup was provided with control options but '
'a GridData object was not provided.')
for name, options in self.polynomial_control_options.items():
shape = options['shape']
units = options['units']
rate_units = get_rate_units(units, time_units, deriv=1)
rate2_units = get_rate_units(units, time_units, deriv=2)
targets = options['targets']
rate_targets = options['rate_targets']
rate2_targets = options['rate2_targets']
num_control_input_nodes = options['order'] + 1
uhat_name = f'polynomial_controls:{name}'
u_name = f'polynomial_control_values:{name}'
u_rate_name = f'polynomial_control_rates:{name}_rate'
u_rate2_name = f'polynomial_control_rates:{name}_rate2'
self._ivc.add_output(uhat_name, shape=(num_control_input_nodes,) + shape, units=units)
self.add_design_var(uhat_name)
self.promotes('control_interp', inputs=[uhat_name],
outputs=[u_name, u_rate_name, u_rate2_name])
# Promote targets from the ODE
for tgt in targets:
self.promotes('ode', inputs=[(tgt, u_name)])
if targets:
self.set_input_defaults(name=u_name,
val=np.ones(shape),
units=options['units'])
# Promote rate targets from the ODE
for tgt in rate_targets:
self.promotes('ode', inputs=[(tgt, u_rate_name)])
if rate_targets:
self.set_input_defaults(name=u_rate_name,
val=np.ones(shape),
units=rate_units)
# Promote rate2 targets from the ODE
for tgt in rate2_targets:
self.promotes('ode', inputs=[(tgt, u_rate2_name)])
if rate2_targets:
self.set_input_defaults(name=u_rate2_name,
val=np.ones(shape),
units=rate2_units)
def _get_rate_source_path(self, state_var):
"""
Get path of the rate source variable so that we can connect it to the
outputs when we're done.
Parameters
----------
state_var : str
The name of the state variable whose path is desired.
Returns
-------
path : str
The path to the rate source of the state variable.
io : str
A string indicating whether the variable in the path is an 'input'
or an 'output'.
"""
var = self.state_options[state_var]['rate_source']
if var == 'time':
rate_path = 'time'
io = 'input'
elif var == 'time_phase':
rate_path = 'time_phase'
io = 'input'
elif self.state_options is not None and var in self.state_options:
rate_path = f'states:{var}'
io = 'input'
elif self.control_options is not None and var in self.control_options:
rate_path = f'controls:{var}'
io = 'output'
elif self.polynomial_control_options is not None and var in self.polynomial_control_options:
rate_path = f'polynomial_controls:{var}'
io = 'output'
elif self.parameter_options is not None and var in self.parameter_options:
rate_path = f'parameters:{var}'
io = 'input'
elif var.endswith('_rate') and self.control_options is not None and \
var[:-5] in self.control_options:
rate_path = f'control_rates:{var}'
io = 'output'
elif var.endswith('_rate2') and self.control_options is not None and \
var[:-6] in self.control_options:
rate_path = f'control_rates:{var}'
io = 'output'
elif var.endswith('_rate') and self.polynomial_control_options is not None and \
var[:-5] in self.polynomial_control_options:
rate_path = f'polynomial_control_rates:{var}'
io = 'output'
elif var.endswith('_rate2') and self.polynomial_control_options is not None and \
var[:-6] in self.polynomial_control_options:
rate_path = f'polynomial_control_rates:{var}'
io = 'output'
else:
rate_path = f'ode.{var}'
io = 'output'
return rate_path, io
|
py | b40933459a4f2c0a06a22f65714051972214cb27 | from typing import Dict, Optional
from sc2 import UnitTypeId, AbilityId
from sc2.position import Point2
from sc2.unit import Unit
from sc2.units import Units
from sharpy.managers.combat2 import GenericMicro, Action, MoveType, MicroStep, CombatModel
high_priority: Dict[UnitTypeId, int] = {
# Terran
UnitTypeId.MULE: 9,
UnitTypeId.SCV: 9,
UnitTypeId.SIEGETANK: 3,
UnitTypeId.SIEGETANKSIEGED: 5, # sieged tanks are much higher priority than unsieged
UnitTypeId.GHOST: 10,
UnitTypeId.REAPER: 8,
UnitTypeId.MARAUDER: 4,
UnitTypeId.MARINE: 8,
UnitTypeId.CYCLONE: 4,
UnitTypeId.HELLION: 8,
UnitTypeId.HELLIONTANK: 3,
UnitTypeId.THOR: 3,
UnitTypeId.MEDIVAC: -1,
UnitTypeId.VIKINGFIGHTER: -1,
UnitTypeId.VIKINGASSAULT: -1,
UnitTypeId.LIBERATORAG: -1,
UnitTypeId.LIBERATOR: -1,
UnitTypeId.RAVEN: -1,
UnitTypeId.BATTLECRUISER: -1,
UnitTypeId.MISSILETURRET: 1,
UnitTypeId.BUNKER: 2,
# Zerg
UnitTypeId.DRONE: 9,
UnitTypeId.ZERGLING: 8,
UnitTypeId.BANELING: 10,
UnitTypeId.ULTRALISK: 4,
UnitTypeId.QUEEN: 6,
UnitTypeId.ROACH: 4,
UnitTypeId.RAVAGER: 4,
UnitTypeId.HYDRALISK: 8,
UnitTypeId.HYDRALISKBURROWED: 8,
UnitTypeId.LURKERMP: 3,
UnitTypeId.LURKERMPBURROWED: 3,
UnitTypeId.INFESTOR: 10,
UnitTypeId.BROODLORD: -1,
UnitTypeId.MUTALISK: -1,
UnitTypeId.CORRUPTOR: -1,
UnitTypeId.INFESTEDTERRAN: 1,
UnitTypeId.LARVA: -1,
UnitTypeId.EGG: -1,
UnitTypeId.LOCUSTMP: -1,
# Protoss
UnitTypeId.SENTRY: 9,
UnitTypeId.PROBE: 10,
UnitTypeId.HIGHTEMPLAR: 10,
UnitTypeId.DARKTEMPLAR: 9,
UnitTypeId.ADEPT: 8,
UnitTypeId.ZEALOT: 8,
UnitTypeId.STALKER: 4,
UnitTypeId.IMMORTAL: 2,
UnitTypeId.COLOSSUS: 3,
UnitTypeId.ARCHON: 4,
UnitTypeId.SHIELDBATTERY: 1,
UnitTypeId.PHOTONCANNON: 1,
UnitTypeId.PYLON: 2,
UnitTypeId.FLEETBEACON: 3,
}
class MicroAdepts(GenericMicro):
def __init__(self, knowledge):
super().__init__(knowledge)
self.prio_dict = high_priority
def unit_solve_combat(self, unit: Unit, current_command: Action) -> Action:
shuffler = unit.tag % 10
target: Optional[Unit] = None
enemy: Unit
target = self.get_target(self.enemies_near_by, target, unit, shuffler)
shade_tag = self.cd_manager.adept_to_shade.get(unit.tag, None)
if shade_tag:
shade = self.cache.by_tag(shade_tag)
if shade:
if target is None:
nearby: Units = self.knowledge.unit_cache.enemy_in_range(shade.position, 12)
target = self.get_target(nearby, target, shade, shuffler)
if target is not None:
pos: Point2 = target.position
self.ai.do(shade.move(pos.towards(unit, -1)))
if self.move_type in {MoveType.SearchAndDestroy, MoveType.Assault} and self.model == CombatModel.RoachToStalker:
if self.cd_manager.is_ready(unit.tag, AbilityId.ADEPTPHASESHIFT_ADEPTPHASESHIFT):
if target is not None:
return Action(target.position, False, AbilityId.ADEPTPHASESHIFT_ADEPTPHASESHIFT)
return super().unit_solve_combat(unit, current_command)
def get_target(self, nearby: Units, target: Optional[Unit], unit: Unit, shuffler: float) -> Optional[Unit]:
best_score = 0
for enemy in nearby:
d = enemy.distance_to(unit)
if d < 12 and not enemy.is_flying:
score = d * 0.2 - self.unit_values.power(enemy)
if enemy.is_light:
score += 5
score += 0.1 * (enemy.tag % (shuffler + 2))
if score > best_score:
target = enemy
best_score = score
return target
# TODO: Adepts shade on top of marines
# TODO: Adepts put out a escape shade
# TODO: Adepts shade to kill workers?
|
py | b409342787272e3b489af7b55361b71b701f8c5a | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from typing import Callable, Dict, Optional, Sequence, Tuple, Union
from google.api_core import grpc_helpers
from google.api_core import operations_v1
from google.api_core import gapic_v1
import google.auth # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
import grpc # type: ignore
from google.cloud.contact_center_insights_v1.types import contact_center_insights
from google.cloud.contact_center_insights_v1.types import resources
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
from .base import ContactCenterInsightsTransport, DEFAULT_CLIENT_INFO
class ContactCenterInsightsGrpcTransport(ContactCenterInsightsTransport):
"""gRPC backend transport for ContactCenterInsights.
An API that lets users analyze and explore their business
conversation data.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_stubs: Dict[str, Callable]
def __init__(
self,
*,
host: str = "contactcenterinsights.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Sequence[str] = None,
channel: grpc.Channel = None,
api_mtls_endpoint: str = None,
client_cert_source: Callable[[], Tuple[bytes, bytes]] = None,
ssl_channel_credentials: grpc.ChannelCredentials = None,
client_cert_source_for_mtls: Callable[[], Tuple[bytes, bytes]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional(Sequence[str])): A list of scopes. This argument is
ignored if ``channel`` is provided.
channel (Optional[grpc.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
credentials=self._credentials,
credentials_file=credentials_file,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@classmethod
def create_channel(
cls,
host: str = "contactcenterinsights.googleapis.com",
credentials: ga_credentials.Credentials = None,
credentials_file: str = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> grpc.Channel:
"""Create and return a gRPC channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
grpc.Channel: A gRPC channel object.
Raises:
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
return grpc_helpers.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
@property
def grpc_channel(self) -> grpc.Channel:
"""Return the channel designed to connect to this service.
"""
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Sanity check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsClient(self.grpc_channel)
# Return the client from cache.
return self._operations_client
@property
def create_conversation(
self,
) -> Callable[
[contact_center_insights.CreateConversationRequest], resources.Conversation
]:
r"""Return a callable for the create conversation method over gRPC.
Creates a conversation.
Returns:
Callable[[~.CreateConversationRequest],
~.Conversation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_conversation" not in self._stubs:
self._stubs["create_conversation"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/CreateConversation",
request_serializer=contact_center_insights.CreateConversationRequest.serialize,
response_deserializer=resources.Conversation.deserialize,
)
return self._stubs["create_conversation"]
@property
def update_conversation(
self,
) -> Callable[
[contact_center_insights.UpdateConversationRequest], resources.Conversation
]:
r"""Return a callable for the update conversation method over gRPC.
Updates a conversation.
Returns:
Callable[[~.UpdateConversationRequest],
~.Conversation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_conversation" not in self._stubs:
self._stubs["update_conversation"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/UpdateConversation",
request_serializer=contact_center_insights.UpdateConversationRequest.serialize,
response_deserializer=resources.Conversation.deserialize,
)
return self._stubs["update_conversation"]
@property
def get_conversation(
self,
) -> Callable[
[contact_center_insights.GetConversationRequest], resources.Conversation
]:
r"""Return a callable for the get conversation method over gRPC.
Gets a conversation.
Returns:
Callable[[~.GetConversationRequest],
~.Conversation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_conversation" not in self._stubs:
self._stubs["get_conversation"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/GetConversation",
request_serializer=contact_center_insights.GetConversationRequest.serialize,
response_deserializer=resources.Conversation.deserialize,
)
return self._stubs["get_conversation"]
@property
def list_conversations(
self,
) -> Callable[
[contact_center_insights.ListConversationsRequest],
contact_center_insights.ListConversationsResponse,
]:
r"""Return a callable for the list conversations method over gRPC.
Lists conversations.
Returns:
Callable[[~.ListConversationsRequest],
~.ListConversationsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_conversations" not in self._stubs:
self._stubs["list_conversations"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/ListConversations",
request_serializer=contact_center_insights.ListConversationsRequest.serialize,
response_deserializer=contact_center_insights.ListConversationsResponse.deserialize,
)
return self._stubs["list_conversations"]
@property
def delete_conversation(
self,
) -> Callable[[contact_center_insights.DeleteConversationRequest], empty_pb2.Empty]:
r"""Return a callable for the delete conversation method over gRPC.
Deletes a conversation.
Returns:
Callable[[~.DeleteConversationRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_conversation" not in self._stubs:
self._stubs["delete_conversation"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/DeleteConversation",
request_serializer=contact_center_insights.DeleteConversationRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_conversation"]
@property
def create_analysis(
self,
) -> Callable[
[contact_center_insights.CreateAnalysisRequest], operations_pb2.Operation
]:
r"""Return a callable for the create analysis method over gRPC.
Creates an analysis. The long running operation is
done when the analysis has completed.
Returns:
Callable[[~.CreateAnalysisRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_analysis" not in self._stubs:
self._stubs["create_analysis"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/CreateAnalysis",
request_serializer=contact_center_insights.CreateAnalysisRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_analysis"]
@property
def get_analysis(
self,
) -> Callable[[contact_center_insights.GetAnalysisRequest], resources.Analysis]:
r"""Return a callable for the get analysis method over gRPC.
Gets an analysis.
Returns:
Callable[[~.GetAnalysisRequest],
~.Analysis]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_analysis" not in self._stubs:
self._stubs["get_analysis"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/GetAnalysis",
request_serializer=contact_center_insights.GetAnalysisRequest.serialize,
response_deserializer=resources.Analysis.deserialize,
)
return self._stubs["get_analysis"]
@property
def list_analyses(
self,
) -> Callable[
[contact_center_insights.ListAnalysesRequest],
contact_center_insights.ListAnalysesResponse,
]:
r"""Return a callable for the list analyses method over gRPC.
Lists analyses.
Returns:
Callable[[~.ListAnalysesRequest],
~.ListAnalysesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_analyses" not in self._stubs:
self._stubs["list_analyses"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/ListAnalyses",
request_serializer=contact_center_insights.ListAnalysesRequest.serialize,
response_deserializer=contact_center_insights.ListAnalysesResponse.deserialize,
)
return self._stubs["list_analyses"]
@property
def delete_analysis(
self,
) -> Callable[[contact_center_insights.DeleteAnalysisRequest], empty_pb2.Empty]:
r"""Return a callable for the delete analysis method over gRPC.
Deletes an analysis.
Returns:
Callable[[~.DeleteAnalysisRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_analysis" not in self._stubs:
self._stubs["delete_analysis"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/DeleteAnalysis",
request_serializer=contact_center_insights.DeleteAnalysisRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_analysis"]
@property
def export_insights_data(
self,
) -> Callable[
[contact_center_insights.ExportInsightsDataRequest], operations_pb2.Operation
]:
r"""Return a callable for the export insights data method over gRPC.
Export insights data to a destination defined in the
request body.
Returns:
Callable[[~.ExportInsightsDataRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "export_insights_data" not in self._stubs:
self._stubs["export_insights_data"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/ExportInsightsData",
request_serializer=contact_center_insights.ExportInsightsDataRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["export_insights_data"]
@property
def create_issue_model(
self,
) -> Callable[
[contact_center_insights.CreateIssueModelRequest], operations_pb2.Operation
]:
r"""Return a callable for the create issue model method over gRPC.
Creates an issue model.
Returns:
Callable[[~.CreateIssueModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_issue_model" not in self._stubs:
self._stubs["create_issue_model"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/CreateIssueModel",
request_serializer=contact_center_insights.CreateIssueModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["create_issue_model"]
@property
def update_issue_model(
self,
) -> Callable[
[contact_center_insights.UpdateIssueModelRequest], resources.IssueModel
]:
r"""Return a callable for the update issue model method over gRPC.
Updates an issue model.
Returns:
Callable[[~.UpdateIssueModelRequest],
~.IssueModel]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_issue_model" not in self._stubs:
self._stubs["update_issue_model"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/UpdateIssueModel",
request_serializer=contact_center_insights.UpdateIssueModelRequest.serialize,
response_deserializer=resources.IssueModel.deserialize,
)
return self._stubs["update_issue_model"]
@property
def get_issue_model(
self,
) -> Callable[[contact_center_insights.GetIssueModelRequest], resources.IssueModel]:
r"""Return a callable for the get issue model method over gRPC.
Gets an issue model.
Returns:
Callable[[~.GetIssueModelRequest],
~.IssueModel]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_issue_model" not in self._stubs:
self._stubs["get_issue_model"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/GetIssueModel",
request_serializer=contact_center_insights.GetIssueModelRequest.serialize,
response_deserializer=resources.IssueModel.deserialize,
)
return self._stubs["get_issue_model"]
@property
def list_issue_models(
self,
) -> Callable[
[contact_center_insights.ListIssueModelsRequest],
contact_center_insights.ListIssueModelsResponse,
]:
r"""Return a callable for the list issue models method over gRPC.
Lists issue models.
Returns:
Callable[[~.ListIssueModelsRequest],
~.ListIssueModelsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_issue_models" not in self._stubs:
self._stubs["list_issue_models"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/ListIssueModels",
request_serializer=contact_center_insights.ListIssueModelsRequest.serialize,
response_deserializer=contact_center_insights.ListIssueModelsResponse.deserialize,
)
return self._stubs["list_issue_models"]
@property
def delete_issue_model(
self,
) -> Callable[
[contact_center_insights.DeleteIssueModelRequest], operations_pb2.Operation
]:
r"""Return a callable for the delete issue model method over gRPC.
Deletes an issue model.
Returns:
Callable[[~.DeleteIssueModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_issue_model" not in self._stubs:
self._stubs["delete_issue_model"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/DeleteIssueModel",
request_serializer=contact_center_insights.DeleteIssueModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_issue_model"]
@property
def deploy_issue_model(
self,
) -> Callable[
[contact_center_insights.DeployIssueModelRequest], operations_pb2.Operation
]:
r"""Return a callable for the deploy issue model method over gRPC.
Deploys an issue model. Returns an error if a model
is already deployed. An issue model can only be used in
analysis after it has been deployed.
Returns:
Callable[[~.DeployIssueModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "deploy_issue_model" not in self._stubs:
self._stubs["deploy_issue_model"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/DeployIssueModel",
request_serializer=contact_center_insights.DeployIssueModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["deploy_issue_model"]
@property
def undeploy_issue_model(
self,
) -> Callable[
[contact_center_insights.UndeployIssueModelRequest], operations_pb2.Operation
]:
r"""Return a callable for the undeploy issue model method over gRPC.
Undeploys an issue model.
An issue model can not be used in analysis after it has
been undeployed.
Returns:
Callable[[~.UndeployIssueModelRequest],
~.Operation]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "undeploy_issue_model" not in self._stubs:
self._stubs["undeploy_issue_model"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/UndeployIssueModel",
request_serializer=contact_center_insights.UndeployIssueModelRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["undeploy_issue_model"]
@property
def get_issue(
self,
) -> Callable[[contact_center_insights.GetIssueRequest], resources.Issue]:
r"""Return a callable for the get issue method over gRPC.
Gets an issue.
Returns:
Callable[[~.GetIssueRequest],
~.Issue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_issue" not in self._stubs:
self._stubs["get_issue"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/GetIssue",
request_serializer=contact_center_insights.GetIssueRequest.serialize,
response_deserializer=resources.Issue.deserialize,
)
return self._stubs["get_issue"]
@property
def list_issues(
self,
) -> Callable[
[contact_center_insights.ListIssuesRequest],
contact_center_insights.ListIssuesResponse,
]:
r"""Return a callable for the list issues method over gRPC.
Lists issues.
Returns:
Callable[[~.ListIssuesRequest],
~.ListIssuesResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_issues" not in self._stubs:
self._stubs["list_issues"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/ListIssues",
request_serializer=contact_center_insights.ListIssuesRequest.serialize,
response_deserializer=contact_center_insights.ListIssuesResponse.deserialize,
)
return self._stubs["list_issues"]
@property
def update_issue(
self,
) -> Callable[[contact_center_insights.UpdateIssueRequest], resources.Issue]:
r"""Return a callable for the update issue method over gRPC.
Updates an issue.
Returns:
Callable[[~.UpdateIssueRequest],
~.Issue]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_issue" not in self._stubs:
self._stubs["update_issue"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/UpdateIssue",
request_serializer=contact_center_insights.UpdateIssueRequest.serialize,
response_deserializer=resources.Issue.deserialize,
)
return self._stubs["update_issue"]
@property
def calculate_issue_model_stats(
self,
) -> Callable[
[contact_center_insights.CalculateIssueModelStatsRequest],
contact_center_insights.CalculateIssueModelStatsResponse,
]:
r"""Return a callable for the calculate issue model stats method over gRPC.
Gets an issue model's statistics.
Returns:
Callable[[~.CalculateIssueModelStatsRequest],
~.CalculateIssueModelStatsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "calculate_issue_model_stats" not in self._stubs:
self._stubs["calculate_issue_model_stats"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/CalculateIssueModelStats",
request_serializer=contact_center_insights.CalculateIssueModelStatsRequest.serialize,
response_deserializer=contact_center_insights.CalculateIssueModelStatsResponse.deserialize,
)
return self._stubs["calculate_issue_model_stats"]
@property
def create_phrase_matcher(
self,
) -> Callable[
[contact_center_insights.CreatePhraseMatcherRequest], resources.PhraseMatcher
]:
r"""Return a callable for the create phrase matcher method over gRPC.
Creates a phrase matcher.
Returns:
Callable[[~.CreatePhraseMatcherRequest],
~.PhraseMatcher]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "create_phrase_matcher" not in self._stubs:
self._stubs["create_phrase_matcher"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/CreatePhraseMatcher",
request_serializer=contact_center_insights.CreatePhraseMatcherRequest.serialize,
response_deserializer=resources.PhraseMatcher.deserialize,
)
return self._stubs["create_phrase_matcher"]
@property
def get_phrase_matcher(
self,
) -> Callable[
[contact_center_insights.GetPhraseMatcherRequest], resources.PhraseMatcher
]:
r"""Return a callable for the get phrase matcher method over gRPC.
Gets a phrase matcher.
Returns:
Callable[[~.GetPhraseMatcherRequest],
~.PhraseMatcher]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_phrase_matcher" not in self._stubs:
self._stubs["get_phrase_matcher"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/GetPhraseMatcher",
request_serializer=contact_center_insights.GetPhraseMatcherRequest.serialize,
response_deserializer=resources.PhraseMatcher.deserialize,
)
return self._stubs["get_phrase_matcher"]
@property
def list_phrase_matchers(
self,
) -> Callable[
[contact_center_insights.ListPhraseMatchersRequest],
contact_center_insights.ListPhraseMatchersResponse,
]:
r"""Return a callable for the list phrase matchers method over gRPC.
Lists phrase matchers.
Returns:
Callable[[~.ListPhraseMatchersRequest],
~.ListPhraseMatchersResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_phrase_matchers" not in self._stubs:
self._stubs["list_phrase_matchers"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/ListPhraseMatchers",
request_serializer=contact_center_insights.ListPhraseMatchersRequest.serialize,
response_deserializer=contact_center_insights.ListPhraseMatchersResponse.deserialize,
)
return self._stubs["list_phrase_matchers"]
@property
def delete_phrase_matcher(
self,
) -> Callable[
[contact_center_insights.DeletePhraseMatcherRequest], empty_pb2.Empty
]:
r"""Return a callable for the delete phrase matcher method over gRPC.
Deletes a phrase matcher.
Returns:
Callable[[~.DeletePhraseMatcherRequest],
~.Empty]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_phrase_matcher" not in self._stubs:
self._stubs["delete_phrase_matcher"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/DeletePhraseMatcher",
request_serializer=contact_center_insights.DeletePhraseMatcherRequest.serialize,
response_deserializer=empty_pb2.Empty.FromString,
)
return self._stubs["delete_phrase_matcher"]
@property
def update_phrase_matcher(
self,
) -> Callable[
[contact_center_insights.UpdatePhraseMatcherRequest], resources.PhraseMatcher
]:
r"""Return a callable for the update phrase matcher method over gRPC.
Updates a phrase matcher.
Returns:
Callable[[~.UpdatePhraseMatcherRequest],
~.PhraseMatcher]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_phrase_matcher" not in self._stubs:
self._stubs["update_phrase_matcher"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/UpdatePhraseMatcher",
request_serializer=contact_center_insights.UpdatePhraseMatcherRequest.serialize,
response_deserializer=resources.PhraseMatcher.deserialize,
)
return self._stubs["update_phrase_matcher"]
@property
def calculate_stats(
self,
) -> Callable[
[contact_center_insights.CalculateStatsRequest],
contact_center_insights.CalculateStatsResponse,
]:
r"""Return a callable for the calculate stats method over gRPC.
Gets conversation statistics.
Returns:
Callable[[~.CalculateStatsRequest],
~.CalculateStatsResponse]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "calculate_stats" not in self._stubs:
self._stubs["calculate_stats"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/CalculateStats",
request_serializer=contact_center_insights.CalculateStatsRequest.serialize,
response_deserializer=contact_center_insights.CalculateStatsResponse.deserialize,
)
return self._stubs["calculate_stats"]
@property
def get_settings(
self,
) -> Callable[[contact_center_insights.GetSettingsRequest], resources.Settings]:
r"""Return a callable for the get settings method over gRPC.
Gets project-level settings.
Returns:
Callable[[~.GetSettingsRequest],
~.Settings]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_settings" not in self._stubs:
self._stubs["get_settings"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/GetSettings",
request_serializer=contact_center_insights.GetSettingsRequest.serialize,
response_deserializer=resources.Settings.deserialize,
)
return self._stubs["get_settings"]
@property
def update_settings(
self,
) -> Callable[[contact_center_insights.UpdateSettingsRequest], resources.Settings]:
r"""Return a callable for the update settings method over gRPC.
Updates project-level settings.
Returns:
Callable[[~.UpdateSettingsRequest],
~.Settings]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "update_settings" not in self._stubs:
self._stubs["update_settings"] = self.grpc_channel.unary_unary(
"/google.cloud.contactcenterinsights.v1.ContactCenterInsights/UpdateSettings",
request_serializer=contact_center_insights.UpdateSettingsRequest.serialize,
response_deserializer=resources.Settings.deserialize,
)
return self._stubs["update_settings"]
def close(self):
self.grpc_channel.close()
__all__ = ("ContactCenterInsightsGrpcTransport",)
|
py | b40935c53cc99446d0a4f5238dd5fef1c533a706 | import numpy as np
from math import sqrt
# to use a single parameter for all filters
int_16_min = np.iinfo(np.int16).min
def z_coefficient(n):
z = (3 * sqrt(n * (n - 1))) / (sqrt(2 * (2 * n + 5)))
return z
|
py | b40935eee6df53f2259b092073681be7b31ddc16 | # Copyright (c) 2014-present PlatformIO <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from platformio import exception
from platformio.commands import PlatformioCLI
from platformio.commands.lib import cli as cmd_lib
PlatformioCLI.leftover_args = ["--json-output"] # hook for click
def test_search(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_lib, ["search", "DHT22"])
validate_cliresult(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 2
result = clirunner.invoke(cmd_lib, ["search", "DHT22", "--platform=timsp430"])
validate_cliresult(result)
match = re.search(r"Found\s+(\d+)\slibraries:", result.output)
assert int(match.group(1)) > 1
def test_global_install_registry(clirunner, validate_cliresult, isolated_pio_home):
result = clirunner.invoke(
cmd_lib,
[
"-g",
"install",
"64",
"ArduinoJson@~5.10.0",
"[email protected]",
"AsyncMqttClient@<=0.8.2",
"999@77d4eb3f8a",
],
)
validate_cliresult(result)
# install unknown library
result = clirunner.invoke(cmd_lib, ["-g", "install", "Unknown"])
assert result.exit_code != 0
assert isinstance(result.exception, exception.LibNotFound)
items1 = [d.basename for d in isolated_pio_home.join("lib").listdir()]
items2 = [
"ArduinoJson_ID64",
"[email protected]",
"NeoPixelBus_ID547",
"AsyncMqttClient_ID346",
"ESPAsyncTCP_ID305",
"AsyncTCP_ID1826",
"RFcontrol_ID999",
]
assert set(items1) == set(items2)
def test_global_install_archive(clirunner, validate_cliresult, isolated_pio_home):
result = clirunner.invoke(
cmd_lib,
[
"-g",
"install",
"https://github.com/bblanchon/ArduinoJson/archive/v5.8.2.zip",
"https://github.com/bblanchon/ArduinoJson/archive/[email protected]",
"SomeLib=http://dl.platformio.org/libraries/archives/0/9540.tar.gz",
"https://github.com/Pedroalbuquerque/ESP32WebServer/archive/master.zip",
],
)
validate_cliresult(result)
# incorrect requirements
result = clirunner.invoke(
cmd_lib,
[
"-g",
"install",
"https://github.com/bblanchon/ArduinoJson/archive/[email protected]",
],
)
assert result.exit_code != 0
items1 = [d.basename for d in isolated_pio_home.join("lib").listdir()]
items2 = ["ArduinoJson", "SomeLib_ID54", "OneWire_ID1", "ESP32WebServer"]
assert set(items1) >= set(items2)
def test_global_install_repository(clirunner, validate_cliresult, isolated_pio_home):
result = clirunner.invoke(
cmd_lib,
[
"-g",
"install",
"https://github.com/gioblu/PJON.git#3.0",
"https://github.com/gioblu/PJON.git#6.2",
"https://github.com/bblanchon/ArduinoJson.git",
"https://gitlab.com/ivankravets/rs485-nodeproto.git",
"https://github.com/platformio/platformio-libmirror.git",
# "https://developer.mbed.org/users/simon/code/TextLCD/",
"knolleary/pubsubclient#bef58148582f956dfa772687db80c44e2279a163",
],
)
validate_cliresult(result)
items1 = [d.basename for d in isolated_pio_home.join("lib").listdir()]
items2 = [
"PJON",
"PJON@src-79de467ebe19de18287becff0a1fb42d",
"ArduinoJson@src-69ebddd821f771debe7ee734d3c7fa81",
"rs485-nodeproto",
"platformio-libmirror",
"PubSubClient",
]
assert set(items1) >= set(items2)
def test_install_duplicates(clirunner, validate_cliresult, without_internet):
# registry
result = clirunner.invoke(
cmd_lib,
["-g", "install", "http://dl.platformio.org/libraries/archives/0/9540.tar.gz"],
)
validate_cliresult(result)
assert "is already installed" in result.output
# by ID
result = clirunner.invoke(cmd_lib, ["-g", "install", "999"])
validate_cliresult(result)
assert "is already installed" in result.output
# archive
result = clirunner.invoke(
cmd_lib,
[
"-g",
"install",
"https://github.com/Pedroalbuquerque/ESP32WebServer/archive/master.zip",
],
)
validate_cliresult(result)
assert "is already installed" in result.output
# repository
result = clirunner.invoke(
cmd_lib,
["-g", "install", "https://github.com/platformio/platformio-libmirror.git"],
)
validate_cliresult(result)
assert "is already installed" in result.output
def test_global_lib_list(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_lib, ["-g", "list"])
validate_cliresult(result)
assert all(
[
n in result.output
for n in (
"Source: https://github.com/Pedroalbuquerque/ESP32WebServer/archive/master.zip",
"Version: 5.10.1",
"Source: git+https://github.com/gioblu/PJON.git#3.0",
"Version: 1fb26fd",
)
]
)
result = clirunner.invoke(cmd_lib, ["-g", "list", "--json-output"])
assert all(
[
n in result.output
for n in (
"__pkg_dir",
'"__src_url": "git+https://gitlab.com/ivankravets/rs485-nodeproto.git"',
'"version": "5.10.1"',
)
]
)
items1 = [i["name"] for i in json.loads(result.output)]
items2 = [
"ESP32WebServer",
"ArduinoJson",
"ArduinoJson",
"ArduinoJson",
"ArduinoJson",
"AsyncMqttClient",
"AsyncTCP",
"SomeLib",
"ESPAsyncTCP",
"NeoPixelBus",
"OneWire",
"PJON",
"PJON",
"PubSubClient",
"RFcontrol",
"platformio-libmirror",
"rs485-nodeproto",
]
assert sorted(items1) == sorted(items2)
versions1 = [
"{name}@{version}".format(**item) for item in json.loads(result.output)
]
versions2 = [
"[email protected]",
"[email protected]",
"[email protected]",
"[email protected]",
"PJON@07fe9aa",
"PJON@1fb26fd",
"PubSubClient@bef5814",
"RFcontrol@77d4eb3f8a",
]
assert set(versions1) >= set(versions2)
def test_global_lib_update_check(clirunner, validate_cliresult):
result = clirunner.invoke(
cmd_lib, ["-g", "update", "--only-check", "--json-output"]
)
validate_cliresult(result)
output = json.loads(result.output)
assert set(["RFcontrol", "NeoPixelBus"]) == set([l["name"] for l in output])
def test_global_lib_update(clirunner, validate_cliresult):
# update library using package directory
result = clirunner.invoke(
cmd_lib, ["-g", "update", "NeoPixelBus", "--only-check", "--json-output"]
)
validate_cliresult(result)
oudated = json.loads(result.output)
assert len(oudated) == 1
assert "__pkg_dir" in oudated[0]
result = clirunner.invoke(cmd_lib, ["-g", "update", oudated[0]["__pkg_dir"]])
validate_cliresult(result)
assert "Uninstalling NeoPixelBus @ 2.2.4" in result.output
# update rest libraries
result = clirunner.invoke(cmd_lib, ["-g", "update"])
validate_cliresult(result)
assert result.output.count("[Detached]") == 5
assert result.output.count("[Up-to-date]") == 11
assert "Uninstalling RFcontrol @ 77d4eb3f8a" in result.output
# update unknown library
result = clirunner.invoke(cmd_lib, ["-g", "update", "Unknown"])
assert result.exit_code != 0
assert isinstance(result.exception, exception.UnknownPackage)
def test_global_lib_uninstall(clirunner, validate_cliresult, isolated_pio_home):
# uninstall using package directory
result = clirunner.invoke(cmd_lib, ["-g", "list", "--json-output"])
validate_cliresult(result)
items = json.loads(result.output)
result = clirunner.invoke(cmd_lib, ["-g", "uninstall", items[5]["__pkg_dir"]])
validate_cliresult(result)
assert "Uninstalling AsyncTCP" in result.output
# uninstall the rest libraries
result = clirunner.invoke(
cmd_lib,
[
"-g",
"uninstall",
"1",
"https://github.com/bblanchon/ArduinoJson.git",
"ArduinoJson@!=5.6.7",
"RFcontrol",
],
)
validate_cliresult(result)
items1 = [d.basename for d in isolated_pio_home.join("lib").listdir()]
items2 = [
"rs485-nodeproto",
"platformio-libmirror",
"PubSubClient",
"ArduinoJson@src-69ebddd821f771debe7ee734d3c7fa81",
"ESPAsyncTCP_ID305",
"SomeLib_ID54",
"NeoPixelBus_ID547",
"PJON",
"AsyncMqttClient_ID346",
"ArduinoJson_ID64",
"PJON@src-79de467ebe19de18287becff0a1fb42d",
"ESP32WebServer",
]
assert set(items1) == set(items2)
# uninstall unknown library
result = clirunner.invoke(cmd_lib, ["-g", "uninstall", "Unknown"])
assert result.exit_code != 0
assert isinstance(result.exception, exception.UnknownPackage)
def test_lib_show(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_lib, ["show", "64"])
validate_cliresult(result)
assert all([s in result.output for s in ("ArduinoJson", "Arduino", "Atmel AVR")])
result = clirunner.invoke(cmd_lib, ["show", "OneWire", "--json-output"])
validate_cliresult(result)
assert "OneWire" in result.output
def test_lib_builtin(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_lib, ["builtin"])
validate_cliresult(result)
result = clirunner.invoke(cmd_lib, ["builtin", "--json-output"])
validate_cliresult(result)
def test_lib_stats(clirunner, validate_cliresult):
result = clirunner.invoke(cmd_lib, ["stats"])
validate_cliresult(result)
assert all(
[
s in result.output
for s in ("UPDATED", "POPULAR", "https://platformio.org/lib/show")
]
)
result = clirunner.invoke(cmd_lib, ["stats", "--json-output"])
validate_cliresult(result)
assert set(
[
"dlweek",
"added",
"updated",
"topkeywords",
"dlmonth",
"dlday",
"lastkeywords",
]
) == set(json.loads(result.output).keys())
|
py | b409361a74591141b56cee8ed46ff6794445a937 | import asyncio
import time
import unittest.mock
import freezegun
import pytest
import kopf
from kopf.reactor.daemons import daemon_killer
from kopf.reactor.processing import process_resource_event
from kopf.structs.bodies import RawBody
from kopf.structs.containers import ResourceMemories
from kopf.structs.memos import Memo
from kopf.structs.primitives import ToggleSet
class DaemonDummy:
def __init__(self):
super().__init__()
self.mock = unittest.mock.MagicMock()
self.kwargs = {}
self.steps = {
'called': asyncio.Event(),
'finish': asyncio.Event(),
'error': asyncio.Event(),
}
async def wait_for_daemon_done(self):
stopped = self.kwargs['stopped']
await stopped.wait()
while not stopped._stopper.reason & stopped._stopper.reason.DONE:
await asyncio.sleep(0) # give control back to asyncio event loop
@pytest.fixture()
def dummy():
return DaemonDummy()
@pytest.fixture()
def memories():
return ResourceMemories()
@pytest.fixture()
def simulate_cycle(k8s_mocked, registry, settings, resource, memories, mocker):
"""
Simulate K8s behaviour locally in memory (some meaningful approximation).
"""
def _merge_dicts(src, dst):
for key, val in src.items():
if isinstance(val, dict) and key in dst:
_merge_dicts(src[key], dst[key])
else:
dst[key] = val
async def _simulate_cycle(event_object: RawBody):
mocker.resetall()
await process_resource_event(
lifecycle=kopf.lifecycles.all_at_once,
registry=registry,
settings=settings,
resource=resource,
memories=memories,
memobase=Memo(),
raw_event={'type': 'irrelevant', 'object': event_object},
event_queue=asyncio.Queue(),
)
# Do the same as k8s does: merge the patches into the object.
for call in k8s_mocked.patch_obj.call_args_list:
_merge_dicts(call[1]['patch'], event_object)
return _simulate_cycle
@pytest.fixture()
async def operator_paused():
return ToggleSet()
@pytest.fixture()
async def conflicts_found(operator_paused: ToggleSet):
return await operator_paused.make_toggle(name="conflicts_found fixture")
@pytest.fixture()
async def background_daemon_killer(settings, memories, operator_paused):
"""
Run the daemon killer in the background.
"""
task = asyncio.create_task(daemon_killer(
settings=settings, memories=memories, operator_paused=operator_paused))
yield
task.cancel()
try:
await task
except asyncio.CancelledError:
pass
@pytest.fixture()
def frozen_time():
"""
A helper to simulate time movements to step over long sleeps/timeouts.
"""
# TODO LATER: Either freezegun should support the system clock, or find something else.
with freezegun.freeze_time("2020-01-01 00:00:00") as frozen:
# Use freezegun-supported time instead of system clocks -- for testing purposes only.
# NB: Patch strictly after the time is frozen -- to use fake_time(), not real time().
with unittest.mock.patch('time.monotonic', time.time), \
unittest.mock.patch('time.perf_counter', time.time):
yield frozen
# The time-driven tests mock the sleeps, and shift the time as much as it was requested to sleep.
# This makes the sleep realistic for the app code, though executed instantly for the tests.
@pytest.fixture()
def manual_time(k8s_mocked, frozen_time):
async def sleep_or_wait_substitute(delay, *_, **__):
if delay is None:
pass
elif isinstance(delay, float):
frozen_time.tick(delay)
else:
frozen_time.tick(min(delay))
k8s_mocked.sleep_or_wait.side_effect = sleep_or_wait_substitute
yield frozen_time
|
py | b409364e5b277dee80194093ec325f4874513413 | """
This file is part of the repo: https://github.com/tencent-ailab/hifi3dface
If you find the code useful, please cite our paper:
"High-Fidelity 3D Digital Human Creation from RGB-D Selfies."
Xiangkai Lin*, Yajing Chen*, Linchao Bao*, Haoxian Zhang, Sheng Wang, Xuefei Zhe, Xinwei Jiang, Jue Wang, Dong Yu, and Zhengyou Zhang.
arXiv: https://arxiv.org/abs/2010.05562
Copyright (c) [2020] [Tencent AI Lab]
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
import glob
import os
import scipy.io
from PIL import Image
import sys
sys.path.append("../..")
from third_party.ply import write_obj
def load_3dmm_basis(basis_path, uv_path=None, is_whole_uv=True, limit_dim=-1):
"""load 3dmm basis and other useful files.
:param basis_path:
- *.mat, 3DMM basis path.
- It contains shape/exp bases, mesh triangle definition and face vertex mask in bool.
:param uv_path:
- If is_whole_uv is set to true, then uv_path is a file path.
- Otherwise, it is a directory to load regional UVs.
:param is_whole_uv:
- bool. indicate whether we use albedo whole uv bases or regional pyramid bases.
:param limit_dim:
- int. the number of dimension is used for the geometry bases. Default: 1, indicating using all dimensions.
"""
basis3dmm = scipy.io.loadmat(basis_path)
basis3dmm["keypoints"] = np.squeeze(basis3dmm["keypoints"])
# load uv basis
if uv_path is not None and not is_whole_uv:
uv_region_paths = sorted(glob.glob(os.path.join(uv_path, "*_uv512.mat")))
uv_region_bases = {}
for region_path in uv_region_paths:
print("loading %s" % region_path)
region_name = region_path.split("/")[-1].split("_uv")[0]
region_config = scipy.io.loadmat(region_path)
region_config["basis"] = np.transpose(
region_config["basis"] * region_config["sigma"]
)
region_config["indices"] = region_config["indices"].astype(np.int32)
del region_config["sigma"]
assert region_config["basis"].shape[0] < region_config["basis"].shape[1]
uv_region_bases[region_name] = region_config
basis3dmm["uv"] = uv_region_bases
uv_region_paths = sorted(glob.glob(os.path.join(uv_path, "*_uv.mat")))
uv_region_bases = {}
for region_path in uv_region_paths:
print("loading %s" % region_path)
region_name = region_path.split("/")[-1].split("_uv")[0]
region_config = scipy.io.loadmat(region_path)
region_config["basis"] = np.transpose(
region_config["basis"] * region_config["sigma"]
)
region_config["indices"] = region_config["indices"].astype(np.int32)
del region_config["sigma"]
assert region_config["basis"].shape[0] < region_config["basis"].shape[1]
uv_region_bases[region_name] = region_config
basis3dmm["uv2k"] = uv_region_bases
normal_region_paths = sorted(glob.glob(os.path.join(uv_path, "*_normal.mat")))
normal_region_bases = {}
for region_path in normal_region_paths:
print("loading %s" % region_path)
region_name = region_path.split("/")[-1].split("_normal")[0]
region_config = scipy.io.loadmat(region_path)
region_config["basis"] = np.transpose(
region_config["basis"] * region_config["sigma"]
)
region_config["indices"] = region_config["indices"].astype(np.int32)
del region_config["sigma"]
assert region_config["basis"].shape[0] < region_config["basis"].shape[1]
normal_region_bases[region_name] = region_config
basis3dmm["normal2k"] = normal_region_bases
if uv_path is not None and is_whole_uv:
config = scipy.io.loadmat(uv_path)
config["basis"] = config["basis"] * config["sigma"]
config["indices"] = config["indices"].astype(np.int32)
del config["sigma"]
if config["basis"].shape[0] > config["basis"].shape[1]:
config["basis"] = np.transpose(config["basis"])
assert config["basis"].shape[0] < config["basis"].shape[1]
basis3dmm["uv"] = config
if limit_dim > 0 and limit_dim < basis3dmm["basis_shape"].shape[0]:
basis3dmm["basis_shape"] = basis3dmm["basis_shape"][:limit_dim, :]
return basis3dmm
def scatter_nd_numpy(indices, updates, shape):
target = np.zeros(shape, dtype=updates.dtype)
indices_y, indices_x = np.split(indices, 2, axis=1)
indices_y = np.squeeze(indices_y).tolist()
indices_x = np.squeeze(indices_x).tolist()
tuple_indices = [indices_y, indices_x]
np.add.at(target, tuple_indices, updates)
return target
# build output from basis
def construct(config, para, uv_size):
mu = config["mu"]
basis = config["basis"]
indices = config["indices"]
result = np.matmul(para, basis) + mu
result = np.reshape(result, [-1, 3])
if "weight" in config:
weight = config["weight"]
result = result * weight
uv_map = scatter_nd_numpy(indices, result, (uv_size, uv_size, 3))
uv_mask = scatter_nd_numpy(indices, np.ones_like(result), (uv_size, uv_size, 3))
uv_mask = np.clip(uv_mask, 0, 1)
return uv_map, uv_mask
def construct_mask(config, uv_size):
v_mask = np.ones_like(config["mu"])
indices = config["indices"]
v_mask = np.reshape(v_mask, [-1, 3])
uv_mask = scatter_nd_numpy(indices, v_mask, (uv_size, uv_size, 3))
uv_mask = np.clip(uv_mask, 0, 1)
return uv_mask
def np_get_uv_texture(uv_region_bases, para_tex_dict, uv_size=2048):
uv_map = np.zeros((uv_size, uv_size, 3), dtype=np.float32)
uv_mask = np.zeros((uv_size, uv_size, 3), dtype=np.float32)
for key in para_tex_dict:
region_basis = uv_region_bases[key]
para = para_tex_dict[key]
region_uv, region_mask = construct(region_basis, para, uv_size)
uv_map = uv_map + region_uv
uv_mask = uv_mask + region_mask
uv_mask = np.clip(uv_mask, 0, 1)
uv_map = np.clip(uv_map, 0, 255)
return uv_map, uv_mask
def np_get_region_weight_mask(uv_region_bases, region_weight_dict, uv_size=512):
uv_mask = np.zeros((uv_size, uv_size, 3), dtype=np.float32)
for key in region_weight_dict:
region_basis = uv_region_bases[key]
weight = region_weight_dict[key]
region_mask = construct_mask(region_basis, uv_size)
uv_mask = uv_mask + region_mask * weight
return uv_mask
def np_get_geometry(basis3dmm, para_shape, para_exp=None):
"""compute the geometry according to the 3DMM parameters.
para_shape: shape parameter
para_exp: expression parameter
"""
shape_inc = np.matmul(para_shape, basis3dmm["basis_shape"])
geo = basis3dmm["mu_shape"] + shape_inc
if para_exp is not None:
exp_inc = np.matmul(para_exp, basis3dmm["basis_exp"])
geo = geo + exp_inc
return np.reshape(geo, [-1, basis3dmm["basis_shape"].shape[1] // 3, 3])
def np_get_texture(basis3dmm, para_tex):
"""compute the geometry according to the 3DMM parameters.
para_tex: ver color parameter
"""
tex_inc = np.matmul(para_tex, basis3dmm["basis_tex"])
tex = basis3dmm["mu_tex"] + tex_inc
tex = np.clip(tex, 0, 255)
return np.reshape(tex, [-1, basis3dmm["basis_tex"].shape[1] // 3, 3])
if __name__ == "__main__":
# load basis (albedo is whole face)
basis3dmm = load_3dmm_basis(
"../files/AI-NExT-Shape.mat",
"../files/AI-NExT-Albedo-Global.mat",
is_whole_uv=True,
limit_dim=-1,
)
# randomly generate results for each basis
rand_para_shape = np.random.normal(size=[1, basis3dmm["basis_shape"].shape[0]])
rand_para_uv = np.random.normal(size=[1, basis3dmm["uv"]["basis"].shape[0]])
ver_shape = np_get_geometry(basis3dmm, rand_para_shape)[0]
uv_texture, _ = construct(basis3dmm["uv"], rand_para_uv, 512)
print(basis3dmm["vt_list"].shape, basis3dmm["tri"].shape, basis3dmm["tri_vt"].shape)
# save all files
write_obj(
"rand_shape.obj",
ver_shape,
basis3dmm["vt_list"],
basis3dmm["tri"],
basis3dmm["tri_vt"],
"face.mtl",
)
Image.fromarray(uv_texture.astype(np.uint8)).save("rand_uv.png")
# load regional pyramid bases (it takes a long time because files are huge)
basis3dmm = load_3dmm_basis(
"../files/AI-NExT-Shape-NoAug.mat",
"../files/AI-NExT-AlbedoNormal-RPB",
is_whole_uv=False,
limit_dim=-1,
)
rand_para_uv_dict = {}
for region_name in basis3dmm["uv"]:
rand_para = np.random.normal(
size=[1, basis3dmm["uv"][region_name]["basis"].shape[0]]
)
rand_para_uv_dict[region_name] = rand_para
uv_tex512, _ = np_get_uv_texture(basis3dmm["uv"], rand_para_uv_dict, 512)
uv_tex2048, _ = np_get_uv_texture(basis3dmm["uv2k"], rand_para_uv_dict, 2048)
uv_norm2048, _ = np_get_uv_texture(basis3dmm["normal2k"], rand_para_uv_dict, 2048)
Image.fromarray(uv_tex512.astype(np.uint8)).save("uv_tex512.png")
Image.fromarray(uv_tex2048.astype(np.uint8)).save("uv_tex2048.png")
Image.fromarray(uv_norm2048.astype(np.uint8)).save("uv_norm2048.png")
|
py | b40937670fdbd2a91982b221e53eb1f071b2f73e | # -*- coding: utf-8 -*-
from logging import getLogger
import os
import re
import subprocess
import sys
import tkinter as tk
from logging import exception
from os import makedirs
from tkinter import messagebox, ttk
from tkinter.messagebox import showerror
from typing import List, Union, Dict, Tuple
import thonny
from thonny import get_runner, get_workbench, running, tktextext, ui_utils
from thonny.common import InlineCommand, is_same_path, normpath_with_actual_case, path_startswith
from thonny.languages import tr
from thonny.plugins.cpython import CPythonProxy
from thonny.plugins.cpython_ssh import SshCPythonProxy
from thonny.running import get_interpreter_for_subprocess, InlineCommandDialog
from thonny.ui_utils import (
AutoScrollbar,
CommonDialog,
askopenfilename,
get_busy_cursor,
lookup_style_option,
open_path_in_system_file_manager,
scrollbar_style,
ems_to_pixels,
)
from thonny.workdlg import SubprocessDialog
PIP_INSTALLER_URL = "https://bootstrap.pypa.io/get-pip.py"
logger = getLogger(__name__)
_EXTRA_MARKER_RE = re.compile(r"""^\s*extra\s*==\s*("(?:[^"]|\\")*"|'(?:[^']|\\')*')\s*$""")
class PipDialog(CommonDialog):
def __init__(self, master):
self._state = "idle" # possible values: "listing", "fetching", "idle"
self._process = None
self._closed = False
self._active_distributions = {}
self.current_package_data = None
super().__init__(master)
main_frame = ttk.Frame(self)
main_frame.grid(sticky=tk.NSEW, ipadx=15, ipady=15)
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
self.title(self._get_title())
self._create_widgets(main_frame)
self.search_box.focus_set()
self.bind("<Escape>", self._on_close, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self._show_instructions()
self._start_update_list()
def get_search_button_text(self):
return tr("Search on PyPI")
def get_install_button_text(self):
return tr("Install")
def get_upgrade_button_text(self):
return tr("Upgrade")
def get_uninstall_button_text(self):
return tr("Uninstall")
def get_delete_selected_button_text(self):
return tr("Delete selected")
def _create_widgets(self, parent):
header_frame = ttk.Frame(parent)
header_frame.grid(row=1, column=0, sticky="nsew", padx=15, pady=(15, 0))
header_frame.columnconfigure(0, weight=1)
header_frame.rowconfigure(1, weight=1)
name_font = tk.font.nametofont("TkDefaultFont").copy()
name_font.configure(size=16)
self.search_box = ttk.Entry(header_frame)
self.search_box.grid(row=1, column=0, sticky="nsew")
self.search_box.bind("<Return>", self._on_search, False)
self.search_box.bind("<KP_Enter>", self._on_search, False)
# Selecting chars in the search box with mouse didn't make the box active on Linux without following line
self.search_box.bind("<B1-Motion>", lambda _: self.search_box.focus_set())
self.search_button = ttk.Button(
header_frame, text=self.get_search_button_text(), command=self._on_search, width=25
)
self.search_button.grid(row=1, column=1, sticky="nse", padx=(10, 0))
main_pw = tk.PanedWindow(
parent,
orient=tk.HORIZONTAL,
background=lookup_style_option("TPanedWindow", "background"),
sashwidth=15,
)
main_pw.grid(row=2, column=0, sticky="nsew", padx=15, pady=15)
parent.rowconfigure(2, weight=1)
parent.columnconfigure(0, weight=1)
listframe = ttk.Frame(main_pw, relief="flat", borderwidth=1)
listframe.rowconfigure(0, weight=1)
listframe.columnconfigure(0, weight=1)
self.listbox = ui_utils.ThemedListbox(
listframe,
activestyle="dotbox",
width=20,
height=20,
selectborderwidth=0,
relief="flat",
# highlightthickness=4,
# highlightbackground="red",
# highlightcolor="green",
borderwidth=0,
)
self.listbox.insert("end", " <" + tr("INSTALL") + ">")
self.listbox.bind("<<ListboxSelect>>", self._on_listbox_select, True)
self.listbox.grid(row=0, column=0, sticky="nsew")
list_scrollbar = AutoScrollbar(
listframe, orient=tk.VERTICAL, style=scrollbar_style("Vertical")
)
list_scrollbar.grid(row=0, column=1, sticky="ns")
list_scrollbar["command"] = self.listbox.yview
self.listbox["yscrollcommand"] = list_scrollbar.set
info_frame = ttk.Frame(main_pw)
info_frame.columnconfigure(0, weight=1)
info_frame.rowconfigure(1, weight=1)
main_pw.add(listframe)
main_pw.add(info_frame)
self.title_label = ttk.Label(info_frame, text="", font=name_font)
self.title_label.grid(row=0, column=0, sticky="w", padx=5, pady=(0, ems_to_pixels(1)))
info_text_frame = tktextext.TextFrame(
info_frame,
read_only=True,
horizontal_scrollbar=False,
background=lookup_style_option("TFrame", "background"),
vertical_scrollbar_class=AutoScrollbar,
vertical_scrollbar_style=scrollbar_style("Vertical"),
horizontal_scrollbar_style=scrollbar_style("Horizontal"),
width=70,
height=10,
)
info_text_frame.configure(borderwidth=0)
info_text_frame.grid(row=1, column=0, columnspan=4, sticky="nsew", pady=(0, 10))
self.info_text = info_text_frame.text
link_color = lookup_style_option("Url.TLabel", "foreground", "red")
self.info_text.tag_configure("url", foreground=link_color, underline=True)
self.info_text.tag_bind("url", "<ButtonRelease-1>", self._handle_url_click)
self.info_text.tag_bind("url", "<Enter>", lambda e: self.info_text.config(cursor="hand2"))
self.info_text.tag_bind("url", "<Leave>", lambda e: self.info_text.config(cursor=""))
self.info_text.tag_configure("install_reqs", foreground=link_color, underline=True)
self.info_text.tag_bind(
"install_reqs", "<ButtonRelease-1>", self._handle_install_requirements_click
)
self.info_text.tag_bind(
"install_reqs", "<Enter>", lambda e: self.info_text.config(cursor="hand2")
)
self.info_text.tag_bind(
"install_reqs", "<Leave>", lambda e: self.info_text.config(cursor="")
)
self.info_text.tag_configure("install_file", foreground=link_color, underline=True)
self.info_text.tag_bind(
"install_file", "<ButtonRelease-1>", self._handle_install_file_click
)
self.info_text.tag_bind(
"install_file", "<Enter>", lambda e: self.info_text.config(cursor="hand2")
)
self.info_text.tag_bind(
"install_file", "<Leave>", lambda e: self.info_text.config(cursor="")
)
default_font = tk.font.nametofont("TkDefaultFont")
self.info_text.configure(font=default_font, wrap="word")
bold_font = default_font.copy()
# need to explicitly copy size, because Tk 8.6 on certain Ubuntus use bigger font in copies
bold_font.configure(weight="bold", size=default_font.cget("size"))
self.info_text.tag_configure("caption", font=bold_font)
self.info_text.tag_configure("bold", font=bold_font)
self.command_frame = ttk.Frame(info_frame)
self.command_frame.grid(row=2, column=0, sticky="w")
self.install_button = ttk.Button(
self.command_frame,
text=" " + self.get_upgrade_button_text() + " ",
command=self._on_install_click,
width=20,
)
self.install_button.grid(row=0, column=0, sticky="w", padx=0)
self.uninstall_button = ttk.Button(
self.command_frame,
text=self.get_uninstall_button_text(),
command=self._on_uninstall_click,
width=20,
)
self.uninstall_button.grid(row=0, column=1, sticky="w", padx=(5, 0))
self.advanced_button = ttk.Button(
self.command_frame,
text="...",
width=3,
command=lambda: self._perform_pip_action("advanced"),
)
self.advanced_button.grid(row=0, column=2, sticky="w", padx=(5, 0))
self.close_button = ttk.Button(info_frame, text=tr("Close"), command=self._on_close)
self.close_button.grid(row=2, column=3, sticky="e")
def _set_state(self, state, force_normal_cursor=False):
self._state = state
action_buttons = [
self.install_button,
self.advanced_button,
self.uninstall_button,
]
other_widgets = [
self.listbox,
# self.search_box, # looks funny when disabled
self.search_button,
]
if state == "idle" and not self._read_only():
for widget in action_buttons:
widget["state"] = tk.NORMAL
else:
for widget in action_buttons:
widget["state"] = tk.DISABLED
if state == "idle":
for widget in other_widgets:
widget["state"] = tk.NORMAL
else:
self.config(cursor=get_busy_cursor())
for widget in other_widgets:
widget["state"] = tk.DISABLED
if state == "idle" or force_normal_cursor:
self.config(cursor="")
else:
self.config(cursor=get_busy_cursor())
def _get_state(self):
return self._state
def _instructions_for_command_line_install(self):
return (
"Alternatively, if you have an older pip installed, then you can install packages "
+ "on the command line (Tools → Open system shell...)"
)
def _start_update_list(self, name_to_show=None):
raise NotImplementedError()
def _update_list(self, name_to_show):
self.listbox.delete(1, "end")
for name in sorted(self._active_distributions.keys()):
self.listbox.insert("end", " " + name)
if name_to_show is None or name_to_show not in self._active_distributions.keys():
self._show_instructions()
else:
self._on_listbox_select_package(name_to_show)
def _on_listbox_select(self, event):
self.listbox.focus_set()
selection = self.listbox.curselection()
if len(selection) == 1:
self.listbox.activate(selection[0])
if selection[0] == 0: # special first item
self._show_instructions()
else:
self._on_listbox_select_package(self.listbox.get(selection[0]).strip())
def _on_listbox_select_package(self, name):
self._start_show_package_info(name)
def _on_search(self, event=None):
if self._get_state() != "idle":
# Search box is not made inactive for busy-states
return
if self.search_box.get().strip() == "":
return
self._start_search(self.search_box.get().strip())
def _on_install_click(self):
self._perform_pip_action("install")
def _on_uninstall_click(self):
self._perform_pip_action("uninstall")
def _clear(self):
self.current_package_data = None
self.title_label.grid_remove()
self.command_frame.grid_remove()
self._clear_info_text()
def _clear_info_text(self):
self.info_text.direct_delete("1.0", "end")
def _append_info_text(self, text, tags=()):
self.info_text.direct_insert("end", text, tags)
def _show_instructions(self):
self._clear()
if self._read_only():
self._show_read_only_instructions()
else:
self._append_info_text(tr("Install from PyPI") + "\n", ("caption",))
self.info_text.direct_insert(
"end",
tr(
"If you don't know where to get the package from, "
+ "then most likely you'll want to search the Python Package Index. "
+ "Start by entering the name of the package in the search box above and pressing ENTER."
)
+ "\n\n",
)
self.info_text.direct_insert(
"end", tr("Install from requirements file") + "\n", ("caption",)
)
self._append_info_text(tr("Click" + " "))
self._append_info_text(tr("here"), ("install_reqs",))
self.info_text.direct_insert(
"end",
" "
+ tr("to locate requirements.txt file and install the packages specified in it.")
+ "\n\n",
)
self._show_instructions_about_installing_from_local_file()
self._show_instructions_about_existing_packages()
if self._get_target_directory():
self._show_instructions_about_target()
self._select_list_item(0)
def _show_read_only_instructions(self):
self._append_info_text(tr("Browse the packages") + "\n", ("caption",))
self.info_text.direct_insert(
"end",
tr(
"With current interpreter you can only browse the packages here.\n"
+ "Use 'Tools → Open system shell...' for installing, upgrading or uninstalling."
)
+ "\n\n",
)
if self._get_target_directory():
self._append_info_text(tr("Packages' directory") + "\n", ("caption",))
self.info_text.direct_insert("end", self._get_target_directory(), ("target_directory"))
def _show_instructions_about_installing_from_local_file(self):
self._append_info_text(tr("Install from local file") + "\n", ("caption",))
self._append_info_text(tr("Click") + " ")
self._append_info_text(tr("here"), ("install_file",))
self.info_text.direct_insert(
"end",
" "
+ tr(
"to locate and install the package file (usually with .whl, .tar.gz or .zip extension)."
)
+ "\n\n",
)
def _show_instructions_about_existing_packages(self):
self._append_info_text(tr("Upgrade or uninstall") + "\n", ("caption",))
self.info_text.direct_insert(
"end", tr("Start by selecting the package from the left.") + "\n\n"
)
def _show_instructions_about_target(self):
self._append_info_text(tr("Target:") + " ", ("caption",))
if self._should_install_to_site_packages():
self.info_text.direct_insert("end", tr("virtual environment") + "\n", ("caption",))
else:
self.info_text.direct_insert("end", tr("user site packages") + "\n", ("caption",))
self.info_text.direct_insert(
"end",
tr(
"This dialog lists all available packages,"
+ " but allows upgrading and uninstalling only packages from"
)
+ " ",
)
self._append_info_text(self._get_target_directory(), ("url"))
self.info_text.direct_insert(
"end",
". "
+ tr(
"New packages will be also installed into this directory."
+ " Other locations must be managed by alternative means."
),
)
def _start_show_package_info(self, name):
self.current_package_data = None
# Fetch info from PyPI
self._set_state("fetching")
# Following fetches info about latest version.
# This is OK even when we're looking an installed older version
# because new version may have more relevant and complete info.
_start_fetching_package_info(name, None, self._show_package_info)
self._clear_info_text()
self.title_label["text"] = ""
self.title_label.grid()
self.command_frame.grid()
self.uninstall_button["text"] = self.get_uninstall_button_text()
active_dist = self._get_active_dist(name)
if active_dist is not None:
self.title_label["text"] = active_dist["project_name"]
self._append_info_text(tr("Installed version:") + " ", ("caption",))
self._append_info_text(active_dist["version"] + "\n")
self._append_info_text(tr("Installed to:") + " ", ("caption",))
# TODO: only show link if local backend
self.info_text.direct_insert(
"end", normpath_with_actual_case(active_dist["location"]), ("url",)
)
self._append_info_text("\n\n")
self._select_list_item(name)
else:
self._select_list_item(0)
# update gui
if self._is_read_only_package(name):
self.install_button.grid_remove()
self.uninstall_button.grid_remove()
self.advanced_button.grid_remove()
else:
self.install_button.grid(row=0, column=0)
self.advanced_button.grid(row=0, column=2)
if active_dist is not None:
# existing package in target directory
self.install_button["text"] = self.get_upgrade_button_text()
self.install_button["state"] = "disabled"
self.uninstall_button.grid(row=0, column=1)
else:
# new package
self.install_button["text"] = self.get_install_button_text()
self.uninstall_button.grid_remove()
def _show_package_info(self, name, data, error_code=None):
self._set_state("idle")
self.current_package_data = data
def write(s, tag=None):
if tag is None:
tags = ()
else:
tags = (tag,)
self._append_info_text(s, tags)
def write_att(caption, value, value_tag=None):
write(caption + ": ", "caption")
write(value, value_tag)
write("\n")
if error_code is not None:
if error_code == 404:
write(tr("Could not find the package from PyPI."))
if not self._get_active_version(name):
# new package
write("\n" + tr("Please check your spelling!"))
else:
write(
tr("Could not find the package info from PyPI.")
+ " "
+ tr("Error code:")
+ " "
+ str(error_code)
)
return
info = data["info"]
self.title_label["text"] = info["name"] # search name could have been a bit different
latest_stable_version = _get_latest_stable_version(data["releases"].keys())
if latest_stable_version is not None:
write_att(tr("Latest stable version"), latest_stable_version)
else:
write_att(tr("Latest version"), data["info"]["version"])
write_att(tr("Summary"), info["summary"])
write_att(tr("Author"), info["author"])
write_att(tr("Homepage"), info["home_page"], "url")
if info.get("bugtrack_url", None):
write_att(tr("Bugtracker"), info["bugtrack_url"], "url")
if info.get("docs_url", None):
write_att(tr("Documentation"), info["docs_url"], "url")
if info.get("package_url", None):
write_att(tr("PyPI page"), info["package_url"], "url")
if info.get("requires_dist", None):
# Available only when release is created by a binary wheel
# https://github.com/pypa/pypi-legacy/issues/622#issuecomment-305829257
requires_dist = info["requires_dist"]
assert isinstance(requires_dist, list)
assert all(isinstance(item, str) for item in requires_dist)
# See https://www.python.org/dev/peps/pep-0345/#environment-markers.
# This will filter only the most obvious dependencies marked simply with
# ``extras == *``.
# The other, more complex markings, are accepted as they are also
# more informative (*e.g.*, the desired platform).
remaining_requires_dist = [] # type: List[str]
for item in requires_dist:
if ";" not in item:
remaining_requires_dist.append(item)
continue
_, marker_text = item.split(";", 1)
# Check if the environment marker matches ``extra == '*'.
#
# This is easier implemented with ``packaging.markers``, but we want to
# avoid introducing a new dependency as Thonny is included in
# distributions which might lack a package for it.
#
# Please see
# https://packaging.pypa.io/en/latest/_modules/packaging/markers.html#Marker
# for the parsing rules.
# Match extra == quoted string
is_extra = _EXTRA_MARKER_RE.match(marker_text) is not None
if is_extra:
continue
remaining_requires_dist.append(item)
write_att(tr("Requires"), ", ".join(remaining_requires_dist))
if self._get_active_version(name) != latest_stable_version or not self._get_active_version(
name
):
self.install_button["state"] = "normal"
else:
self.install_button["state"] = "disabled"
def _is_read_only_package(self, name):
dist = self._get_active_dist(name)
if dist is None:
return False
else:
return normpath_with_actual_case(dist["location"]) != self._get_target_directory()
def _normalize_name(self, name):
# looks like (in some cases?) pip list gives the name as it was used during install
# ie. the list may contain lowercase entry, when actual metadata has uppercase name
# Example: when you "pip install cx-freeze", then "pip list"
# really returns "cx-freeze" although correct name is "cx_Freeze"
# https://www.python.org/dev/peps/pep-0503/#id4
return re.sub(r"[-_.]+", "-", name).lower().strip()
def _start_search(self, query, discard_selection=True):
self.current_package_data = None
# Fetch info from PyPI
self._set_state("fetching")
self._clear()
self.title_label.grid()
self.title_label["text"] = tr("Search results")
self.info_text.direct_insert("1.0", tr("Searching") + " ...")
_start_fetching_search_results(query, self._show_search_results)
if discard_selection:
self._select_list_item(0)
def _show_search_results(self, query, results: Union[List[Dict], str]) -> None:
self._set_state("idle")
self._clear_info_text()
results = self._tweak_search_results(results, query)
if isinstance(results, str) or not results:
if not results:
self._append_info_text("No results.\n\n")
else:
self._append_info_text("Could not fetch search results:\n")
self._append_info_text(results + "\n\n")
self._append_info_text("Try opening the package directly:\n")
self._append_info_text(query, ("url",))
return
for item in results:
# self._append_info_text("•")
tags = ("url",)
if item["name"].lower() == query.lower():
tags = tags + ("bold",)
self._append_info_text(item["name"], tags)
self._append_info_text("\n")
self.info_text.direct_insert(
"end", item.get("description", "<No description>").strip() + "\n"
)
self._append_info_text("\n")
def _select_list_item(self, name_or_index):
if isinstance(name_or_index, int):
index = name_or_index
else:
normalized_items = list(map(self._normalize_name, self.listbox.get(0, "end")))
try:
index = normalized_items.index(self._normalize_name(name_or_index))
except Exception:
exception(tr("Can't find package name from the list:") + " " + name_or_index)
return
old_state = self.listbox["state"]
try:
self.listbox["state"] = "normal"
self.listbox.select_clear(0, "end")
self.listbox.select_set(index)
self.listbox.activate(index)
self.listbox.see(index)
finally:
self.listbox["state"] = old_state
def _get_install_command(self):
cmd = ["install", "--no-cache-dir"]
if self._use_user_install():
cmd.append("--user")
return cmd
def _perform_pip_action(self, action: str) -> bool:
if self._perform_pip_action_without_refresh(action):
if action == "uninstall":
self._show_instructions() # Make the old package go away as fast as possible
self._start_update_list(
None if action == "uninstall" else self.current_package_data["info"]["name"]
)
get_workbench().event_generate("RemoteFilesChanged")
def _perform_pip_action_without_refresh(self, action: str) -> bool:
assert self._get_state() == "idle"
assert self.current_package_data is not None
data = self.current_package_data
name = self.current_package_data["info"]["name"]
install_cmd = self._get_install_command()
if action == "install":
title = tr("Installing '%s'") % name
if not self._confirm_install(self.current_package_data):
return False
args = install_cmd
if self._get_active_version(name) is not None:
title = tr("Upgrading '%s'") % name
args.append("--upgrade")
args.append(name)
elif action == "uninstall":
title = tr("Uninstalling '%s'") % name
if name in ["pip", "setuptools"] and not messagebox.askyesno(
tr("Really uninstall?"),
tr(
"Package '{}' is required for installing and uninstalling other packages."
).format(name)
+ "\n\n"
+ tr("Are you sure you want to uninstall it?"),
master=self,
):
return False
args = ["uninstall", "-y", name]
elif action == "advanced":
title = tr("Installing")
details = _ask_installation_details(
self,
data,
_get_latest_stable_version(list(data["releases"].keys())),
self.does_support_update_deps_switch(),
)
if details is None: # Cancel
return False
version, package_data, upgrade_deps = details
if not self._confirm_install(package_data):
return False
args = install_cmd
if upgrade_deps:
args.append("--upgrade")
args.append(name + "==" + version)
else:
raise RuntimeError("Unknown action")
returncode, _, _ = self._run_pip_with_dialog(args, title=title)
return returncode == 0
def does_support_update_deps_switch(self):
return True
def _handle_install_file_click(self, event):
if self._get_state() != "idle":
return
filename = askopenfilename(
master=self,
filetypes=[(tr("Package"), ".whl .zip .tar.gz"), (tr("all files"), ".*")],
initialdir=get_workbench().get_local_cwd(),
parent=self.winfo_toplevel(),
)
if filename: # Note that missing filename may be "" or () depending on tkinter version
self._install_file(filename, False)
def _handle_install_requirements_click(self, event):
if self._get_state() != "idle":
return
filename = askopenfilename(
master=self,
filetypes=[("requirements", ".txt"), (tr("all files"), ".*")],
initialdir=get_workbench().get_local_cwd(),
parent=self.winfo_toplevel(),
)
if filename: # Note that missing filename may be "" or () depending on tkinter version
self._install_file(filename, True)
def _handle_target_directory_click(self, event):
if self._get_target_directory():
open_path_in_system_file_manager(self._get_target_directory())
def _install_file(self, filename, is_requirements_file):
args = self._get_install_file_command(filename, is_requirements_file)
returncode, out, err = self._run_pip_with_dialog(
args, title=tr("Installing '%s'") % os.path.basename(filename)
)
# Try to find out the name of the package we're installing
name = None
# output should include a line like this:
# Installing collected packages: pytz, six, python-dateutil, numpy, pandas
inst_lines = re.findall(
"^Installing collected packages:.*?$", out, re.MULTILINE | re.IGNORECASE
) # @UndefinedVariable
if len(inst_lines) == 1:
# take last element
elements = re.split(",|:", inst_lines[0])
name = elements[-1].strip()
self._start_update_list(name)
def _get_install_file_command(self, filename, is_requirements_file):
args = ["install"]
if self._use_user_install():
args.append("--user")
if is_requirements_file:
args.append("-r")
args.append(filename)
return args
def _handle_url_click(self, event):
url = _extract_click_text(self.info_text, event, "url")
if url is not None:
if url.startswith("http:") or url.startswith("https:"):
import webbrowser
webbrowser.open(url)
elif os.path.sep in url:
os.makedirs(url, exist_ok=True)
open_path_in_system_file_manager(url)
else:
self._start_show_package_info(url)
def _on_close(self, event=None):
self._closed = True
self.destroy()
def _get_active_version(self, name):
dist = self._get_active_dist(name)
if dist is None:
return None
else:
return dist["version"]
def _get_active_dist(self, name):
normname = self._normalize_name(name)
for key in self._active_distributions:
if self._normalize_name(key) == normname:
return self._active_distributions[key]
return None
def _run_pip_with_dialog(self, args, title) -> Tuple[int, str, str]:
raise NotImplementedError()
def _get_interpreter(self):
raise NotImplementedError()
def _should_install_to_site_packages(self):
raise NotImplementedError()
def _use_user_install(self):
return not self._should_install_to_site_packages()
def _get_target_directory(self):
raise NotImplementedError()
def _get_title(self):
return tr("Manage packages for %s") % self._get_interpreter()
def _confirm_install(self, package_data):
return True
def _read_only(self):
if self._should_install_to_site_packages():
return False
else:
# readonly if not in a virtual environment
# and user site packages is disabled
import site
return not site.ENABLE_USER_SITE
def _tweak_search_results(self, results, query):
return results
def _get_extra_switches(self):
result = ["--disable-pip-version-check"]
proxy = os.environ.get("https_proxy", os.environ.get("http_proxy", None))
if proxy:
result.append("--proxy=" + proxy)
return result
class BackendPipDialog(PipDialog):
def __init__(self, master):
self._backend_proxy = get_runner().get_backend_proxy()
super().__init__(master)
self._last_name_to_show = None
def _start_update_list(self, name_to_show=None):
assert self._get_state() in [None, "idle"]
self._set_state("listing")
get_workbench().bind("get_active_distributions_response", self._complete_update_list, True)
self._last_name_to_show = name_to_show
logger.debug("Sending get_active_distributions")
get_runner().send_command(InlineCommand("get_active_distributions"))
def _complete_update_list(self, msg):
if self._closed:
return
get_workbench().unbind("get_active_distributions_response", self._complete_update_list)
if "error" in msg:
self._clear_info_text()
self.info_text.direct_insert("1.0", msg["error"])
self._set_state("idle", True)
return
self._active_distributions = msg.distributions
self._set_state("idle", True)
self._update_list(self._last_name_to_show)
class CPythonBackendPipDialog(BackendPipDialog):
def __init__(self, master):
super().__init__(master)
assert isinstance(self._backend_proxy, (CPythonProxy, SshCPythonProxy))
def _get_interpreter(self):
return get_runner().get_local_executable()
def _create_python_process(self, args):
proc = running.create_backend_python_process(args, stderr=subprocess.STDOUT)
return proc, proc.cmd
def _confirm_install(self, package_data):
name = package_data["info"]["name"]
if name.lower().startswith("thonny"):
return messagebox.askyesno(
tr("Confirmation"),
tr(
"Looks like you are installing a Thonny-related package.\n"
+ "If you meant to install a Thonny plugin, then you should\n"
+ "choose 'Tools → Manage plugins...' instead\n"
+ "\n"
+ "Are you sure you want to install %s for the back-end?"
)
% name,
master=self,
)
else:
return True
def _get_target_directory(self):
if self._should_install_to_site_packages():
return normpath_with_actual_case(self._backend_proxy.get_site_packages())
else:
usp = self._backend_proxy.get_user_site_packages()
if isinstance(self._backend_proxy, CPythonProxy):
os.makedirs(usp, exist_ok=True)
return normpath_with_actual_case(usp)
else:
return usp
def _should_install_to_site_packages(self):
return self._targets_virtual_environment()
def _targets_virtual_environment(self):
return get_runner().using_venv()
def _run_pip_with_dialog(self, args, title) -> Tuple[int, str, str]:
proxy = get_runner().get_backend_proxy()
assert isinstance(proxy, CPythonProxy)
sub_cmd = [proxy._reported_executable, "-m", "pip"] + args + self._get_extra_switches()
back_cmd = InlineCommand("execute_system_command", cmd_line=sub_cmd)
dlg = InlineCommandDialog(
self,
back_cmd,
title="pip",
instructions=title,
autostart=True,
output_prelude=subprocess.list2cmdline(sub_cmd) + "\n\n",
)
ui_utils.show_dialog(dlg)
return dlg.returncode, dlg.stdout, dlg.stderr
class PluginsPipDialog(PipDialog):
def __init__(self, master):
PipDialog.__init__(self, master)
# make sure directory exists, so user can put her plug-ins there
d = self._get_target_directory()
makedirs(d, exist_ok=True)
def _start_update_list(self, name_to_show=None):
assert self._get_state() in [None, "idle"]
import pkg_resources
pkg_resources._initialize_master_working_set()
self._active_distributions = {
dist.key: {
"project_name": dist.project_name,
"key": dist.key,
"location": dist.location,
"version": dist.version,
}
for dist in pkg_resources.working_set # pylint: disable=not-an-iterable
}
self._update_list(name_to_show)
def _conflicts_with_thonny_version(self, req_strings):
import pkg_resources
try:
conflicts = []
for req_string in req_strings:
req = pkg_resources.Requirement.parse(req_string)
if req.project_name == "thonny" and thonny.get_version() not in req:
conflicts.append(req_string)
return conflicts
except Exception:
logger.exception("Problem computing conflicts")
return None
def _get_interpreter(self):
return get_interpreter_for_subprocess(sys.executable)
def _should_install_to_site_packages(self):
return self._targets_virtual_environment()
def _targets_virtual_environment(self):
# https://stackoverflow.com/a/42580137/261181
return (
hasattr(sys, "base_prefix")
and sys.base_prefix != sys.prefix
or hasattr(sys, "real_prefix")
and getattr(sys, "real_prefix") != sys.prefix
)
def _confirm_install(self, package_data):
name = package_data["info"]["name"]
reqs = package_data["info"].get("requires_dist", None)
other_version_text = tr(
"NB! There may be another version available "
+ "which is compatible with current Thonny version. "
+ "Click on '...' button to choose the version to install."
)
if name.lower().startswith("thonny-") and not reqs:
showerror(
tr("Thonny plugin without requirements"),
tr(
"Looks like you are trying to install an outdated Thonny\n"
+ "plug-in (it doesn't specify required Thonny version\n"
+ "or hasn't uploaded a whl file before other files).\n\n"
+ "If you still want it, then please install it from the command line."
)
+ "\n\n"
+ other_version_text,
master=self,
)
return False
elif reqs:
conflicts = self._conflicts_with_thonny_version(reqs)
if conflicts:
showerror(
tr("Unsuitable requirements"),
tr("This package requires different Thonny version:")
+ "\n\n "
+ "\n ".join(conflicts)
+ "\n\n"
+ tr("If you still want it, then please install it from the command line.")
+ "\n\n"
+ other_version_text,
master=self,
)
return False
return True
def _get_target_directory(self):
if self._use_user_install():
import site
assert hasattr(site, "getusersitepackages")
os.makedirs(site.getusersitepackages(), exist_ok=True)
return normpath_with_actual_case(site.getusersitepackages())
else:
for d in sys.path:
if ("site-packages" in d or "dist-packages" in d) and path_startswith(
d, sys.prefix
):
return normpath_with_actual_case(d)
return None
def _create_widgets(self, parent):
banner = ttk.Frame(parent, style="Tip.TFrame")
banner.grid(row=0, column=0, sticky="nsew")
banner_msg = (
tr(
"This dialog is for managing Thonny plug-ins and their dependencies.\n"
+ "If you want to install packages for your own programs then choose 'Tools → Manage packages...'"
)
+ "\n"
)
runner = get_runner()
if (
runner is not None
and runner.get_local_executable() is not None
and is_same_path(self._get_interpreter(), get_runner().get_local_executable())
):
banner_msg += (
tr(
"(In this case Thonny's back-end uses same interpreter, so both dialogs manage same packages.)"
)
+ "\n"
)
banner_msg += "\n" + tr(
"NB! You need to restart Thonny after installing / upgrading / uninstalling a plug-in."
)
banner_text = ttk.Label(banner, text=banner_msg, style="Tip.TLabel", justify="left")
banner_text.grid(pady=10, padx=10)
PipDialog._create_widgets(self, parent)
def _get_title(self):
return tr("Thonny plug-ins")
def _run_pip_with_dialog(self, args, title) -> Tuple[int, str, str]:
args = ["-m", "pip"] + args + self._get_extra_switches()
proc = running.create_frontend_python_process(args, stderr=subprocess.STDOUT)
cmd = proc.cmd
dlg = SubprocessDialog(self, proc, "pip", long_description=title, autostart=True)
ui_utils.show_dialog(dlg)
return dlg.returncode, dlg.stdout, dlg.stderr
class DetailsDialog(CommonDialog):
def __init__(self, master, package_metadata, selected_version, support_update_deps_switch):
from distutils.version import StrictVersion
assert isinstance(master, PipDialog)
super().__init__(master)
self.result = None
self._closed = False
self._version_data = None
self._package_name = package_metadata["info"]["name"]
self.title(tr("Advanced install / upgrade / downgrade"))
self.rowconfigure(0, weight=1)
self.columnconfigure(0, weight=1)
main_frame = ttk.Frame(self) # To get styled background
main_frame.grid(sticky="nsew")
main_frame.rowconfigure(0, weight=1)
main_frame.columnconfigure(0, weight=1)
version_label = ttk.Label(main_frame, text=tr("Desired version"))
version_label.grid(row=0, column=0, columnspan=2, padx=20, pady=(15, 0), sticky="w")
def version_sort_key(s):
# Trying to massage understandable versions into valid StrictVersions
if s.replace(".", "").isnumeric(): # stable release
s2 = s + "b999" # make it latest beta version
elif "rc" in s:
s2 = s.replace("rc", "b8")
else:
s2 = s
try:
return StrictVersion(s2)
except Exception:
# use only numbers
nums = re.findall(r"\d+", s)
while len(nums) < 2:
nums.append("0")
return StrictVersion(".".join(nums[:3]))
version_strings = list(package_metadata["releases"].keys())
version_strings.sort(key=version_sort_key, reverse=True)
self.version_var = ui_utils.create_string_var(
selected_version, self._start_fetching_version_info
)
self.version_combo = ttk.Combobox(
main_frame, textvariable=self.version_var, values=version_strings, exportselection=False
)
self.version_combo.state(["!disabled", "readonly"])
self.version_combo.grid(row=1, column=0, columnspan=2, pady=(0, 15), padx=20, sticky="ew")
self.requires_label = ttk.Label(main_frame, text="")
self.requires_label.grid(row=2, column=0, columnspan=2, pady=(0, 15), padx=20, sticky="ew")
self.update_deps_var = tk.IntVar()
self.update_deps_var.set(0)
self.update_deps_cb = ttk.Checkbutton(
main_frame, text=tr("Upgrade dependencies"), variable=self.update_deps_var
)
if support_update_deps_switch:
self.update_deps_cb.grid(row=3, column=0, columnspan=2, padx=20, sticky="w")
self.ok_button = ttk.Button(
main_frame, text=master.get_install_button_text(), command=self._ok
)
self.ok_button.grid(row=4, column=0, pady=15, padx=(20, 0), sticky="se")
self.cancel_button = ttk.Button(main_frame, text=tr("Cancel"), command=self._cancel)
self.cancel_button.grid(row=4, column=1, pady=15, padx=(5, 20), sticky="se")
# self.resizable(height=tk.FALSE, width=tk.FALSE)
self.version_combo.focus_set()
self.bind("<Escape>", self._cancel, True)
self.protocol("WM_DELETE_WINDOW", self._cancel)
if self.version_var.get().strip():
self._start_fetching_version_info()
def _set_state(self, state):
self._state = state
widgets = [
self.version_combo,
# self.search_box, # looks funny when disabled
self.ok_button,
self.update_deps_cb,
]
if state == "idle":
self.config(cursor="")
for widget in widgets:
if widget == self.version_combo:
widget.state(["!disabled", "readonly"])
else:
widget["state"] = tk.NORMAL
else:
self.config(cursor=get_busy_cursor())
for widget in widgets:
widget["state"] = tk.DISABLED
if self.version_var.get().strip() == "" or not self._version_data:
self.ok_button["state"] = tk.DISABLED
def _start_fetching_version_info(self):
self._set_state("busy")
_start_fetching_package_info(
self._package_name, self.version_var.get(), self._show_version_info
)
def _show_version_info(self, name, info, error_code=None):
if self._closed:
return
self._version_data = info
if (
not error_code
and "requires_dist" in info["info"]
and isinstance(info["info"]["requires_dist"], list)
):
reqs = tr("Requires:") + "\n * " + "\n * ".join(info["info"]["requires_dist"])
elif error_code:
reqs = tr("Error code:") + " " + str(error_code)
if "error" in info:
reqs += "\n" + tr("Error:") + " " + info["error"]
else:
reqs = ""
self.requires_label.configure(text=reqs)
self._set_state("idle")
def _ok(self, event=None):
self.result = (self.version_var.get(), self._version_data, bool(self.update_deps_var.get()))
self._closed = True
self.destroy()
def _cancel(self, event=None):
self.result = None
self._closed = True
self.destroy()
def _fetch_url_future(url, timeout=10):
from urllib.request import urlopen
def load_url():
with urlopen(url, timeout=timeout) as conn:
return (conn, conn.read())
from concurrent.futures.thread import ThreadPoolExecutor
executor = ThreadPoolExecutor(max_workers=1)
return executor.submit(load_url)
def _get_latest_stable_version(version_strings):
from distutils.version import LooseVersion
versions = []
for s in version_strings:
if s.replace(".", "").isnumeric(): # Assuming stable versions have only dots and numbers
versions.append(
LooseVersion(s)
) # LooseVersion __str__ doesn't change the version string
if len(versions) == 0:
return None
return str(sorted(versions)[-1])
def _ask_installation_details(master, data, selected_version, support_update_deps_switch):
dlg = DetailsDialog(master, data, selected_version, support_update_deps_switch)
ui_utils.show_dialog(dlg, master)
return dlg.result
def _start_fetching_package_info(name, version_str, completion_handler):
import urllib.error
import urllib.parse
# Fetch info from PyPI
if version_str is None:
url = "https://pypi.org/pypi/{}/json".format(urllib.parse.quote(name))
else:
url = "https://pypi.org/pypi/{}/{}/json".format(
urllib.parse.quote(name), urllib.parse.quote(version_str)
)
url_future = _fetch_url_future(url)
def poll_fetch_complete():
import json
if url_future.done():
try:
_, bin_data = url_future.result()
raw_data = bin_data.decode("UTF-8")
completion_handler(name, json.loads(raw_data))
except urllib.error.HTTPError as e:
completion_handler(
name, {"info": {"name": name}, "error": str(e), "releases": {}}, e.code
)
except Exception as e:
completion_handler(
name, {"info": {"name": name}, "error": str(e), "releases": {}}, e
)
else:
tk._default_root.after(200, poll_fetch_complete)
poll_fetch_complete()
def _start_fetching_search_results(query, completion_handler):
import urllib.parse
url = "https://pypi.org/search/?q={}".format(urllib.parse.quote(query))
url_future = _fetch_url_future(url)
def poll_fetch_complete():
if url_future.done():
try:
_, bin_data = url_future.result()
raw_data = bin_data.decode("UTF-8")
completion_handler(query, _extract_search_results(raw_data))
except Exception as e:
completion_handler(query, str(e))
else:
tk._default_root.after(200, poll_fetch_complete)
poll_fetch_complete()
def _extract_search_results(html_data: str) -> List:
from html.parser import HTMLParser
def get_class(attrs):
for name, value in attrs:
if name == "class":
return value
return None
class_prefix = "package-snippet__"
class PypiSearchResultsParser(HTMLParser):
def __init__(self, data):
HTMLParser.__init__(self)
self.results = []
self.active_class = None
self.feed(data)
def handle_starttag(self, tag, attrs):
if tag == "a" and get_class(attrs) == "package-snippet":
self.results.append({})
if tag in ("span", "p"):
tag_class = get_class(attrs)
if tag_class in ("package-snippet__name", "package-snippet__description"):
self.active_class = tag_class
else:
self.active_class = None
else:
self.active_class = None
def handle_data(self, data):
if self.active_class is not None:
att_name = self.active_class[len(class_prefix) :]
self.results[-1][att_name] = data
def handle_endtag(self, tag):
self.active_class = None
return PypiSearchResultsParser(html_data).results
def _extract_click_text(widget, event, tag):
# http://stackoverflow.com/a/33957256/261181
try:
index = widget.index("@%s,%s" % (event.x, event.y))
tag_indices = list(widget.tag_ranges(tag))
for start, end in zip(tag_indices[0::2], tag_indices[1::2]):
# check if the tag matches the mouse click index
if widget.compare(start, "<=", index) and widget.compare(index, "<", end):
return widget.get(start, end)
except Exception:
logger.exception("extracting click text")
return None
def get_not_supported_translation():
return tr("Package manager is not available for this interpreter")
def load_plugin() -> None:
def get_pip_gui_class():
proxy = get_runner().get_backend_proxy()
if proxy is None:
return None
return proxy.get_pip_gui_class()
def open_backend_pip_gui(*args):
pg_class = get_pip_gui_class()
if pg_class is None:
showerror(tr("Not supported"), get_not_supported_translation())
return
if not get_runner().is_waiting_toplevel_command():
showerror(
tr("Not available"),
tr("You need to stop your program before launching the package manager."),
master=get_workbench(),
)
return
pg = pg_class(get_workbench())
ui_utils.show_dialog(pg)
def open_backend_pip_gui_enabled():
return get_pip_gui_class() is not None
def open_frontend_pip_gui(*args):
pg = PluginsPipDialog(get_workbench())
ui_utils.show_dialog(pg)
get_workbench().add_command(
"backendpipgui",
"tools",
tr("Manage packages..."),
open_backend_pip_gui,
tester=open_backend_pip_gui_enabled,
group=80,
)
get_workbench().add_command(
"pluginspipgui", "tools", tr("Manage plug-ins..."), open_frontend_pip_gui, group=180
)
|
py | b40937acb2bafe9fc99c7f0a450cb22a62ee32fe | #
# Autogenerated by Thrift Compiler (0.9.3)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py:new_style
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
import impala._thrift_gen.Status.ttypes
import impala._thrift_gen.Types.ttypes
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class TExecState(object):
REGISTERED = 0
PLANNING = 1
QUEUED = 2
RUNNING = 3
FINISHED = 4
CANCELLED = 5
FAILED = 6
_VALUES_TO_NAMES = {
0: "REGISTERED",
1: "PLANNING",
2: "QUEUED",
3: "RUNNING",
4: "FINISHED",
5: "CANCELLED",
6: "FAILED",
}
_NAMES_TO_VALUES = {
"REGISTERED": 0,
"PLANNING": 1,
"QUEUED": 2,
"RUNNING": 3,
"FINISHED": 4,
"CANCELLED": 5,
"FAILED": 6,
}
class TExecStats(object):
"""
Attributes:
- latency_ns
- cpu_time_ns
- cardinality
- memory_used
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'latency_ns', None, None, ), # 1
(2, TType.I64, 'cpu_time_ns', None, None, ), # 2
(3, TType.I64, 'cardinality', None, None, ), # 3
(4, TType.I64, 'memory_used', None, None, ), # 4
)
def __init__(self, latency_ns=None, cpu_time_ns=None, cardinality=None, memory_used=None,):
self.latency_ns = latency_ns
self.cpu_time_ns = cpu_time_ns
self.cardinality = cardinality
self.memory_used = memory_used
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.latency_ns = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.cpu_time_ns = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.cardinality = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.I64:
self.memory_used = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TExecStats')
if self.latency_ns is not None:
oprot.writeFieldBegin('latency_ns', TType.I64, 1)
oprot.writeI64(self.latency_ns)
oprot.writeFieldEnd()
if self.cpu_time_ns is not None:
oprot.writeFieldBegin('cpu_time_ns', TType.I64, 2)
oprot.writeI64(self.cpu_time_ns)
oprot.writeFieldEnd()
if self.cardinality is not None:
oprot.writeFieldBegin('cardinality', TType.I64, 3)
oprot.writeI64(self.cardinality)
oprot.writeFieldEnd()
if self.memory_used is not None:
oprot.writeFieldBegin('memory_used', TType.I64, 4)
oprot.writeI64(self.memory_used)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.latency_ns)
value = (value * 31) ^ hash(self.cpu_time_ns)
value = (value * 31) ^ hash(self.cardinality)
value = (value * 31) ^ hash(self.memory_used)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TPlanNodeExecSummary(object):
"""
Attributes:
- node_id
- fragment_idx
- label
- label_detail
- num_children
- estimated_stats
- exec_stats
- is_broadcast
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'node_id', None, None, ), # 1
(2, TType.I32, 'fragment_idx', None, None, ), # 2
(3, TType.STRING, 'label', None, None, ), # 3
(4, TType.STRING, 'label_detail', None, None, ), # 4
(5, TType.I32, 'num_children', None, None, ), # 5
(6, TType.STRUCT, 'estimated_stats', (TExecStats, TExecStats.thrift_spec), None, ), # 6
(7, TType.LIST, 'exec_stats', (TType.STRUCT,(TExecStats, TExecStats.thrift_spec)), None, ), # 7
(8, TType.BOOL, 'is_broadcast', None, None, ), # 8
)
def __init__(self, node_id=None, fragment_idx=None, label=None, label_detail=None, num_children=None, estimated_stats=None, exec_stats=None, is_broadcast=None,):
self.node_id = node_id
self.fragment_idx = fragment_idx
self.label = label
self.label_detail = label_detail
self.num_children = num_children
self.estimated_stats = estimated_stats
self.exec_stats = exec_stats
self.is_broadcast = is_broadcast
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.node_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I32:
self.fragment_idx = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.label = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.label_detail = iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.num_children = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.estimated_stats = TExecStats()
self.estimated_stats.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.LIST:
self.exec_stats = []
(_etype3, _size0) = iprot.readListBegin()
for _i4 in xrange(_size0):
_elem5 = TExecStats()
_elem5.read(iprot)
self.exec_stats.append(_elem5)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.BOOL:
self.is_broadcast = iprot.readBool()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TPlanNodeExecSummary')
if self.node_id is not None:
oprot.writeFieldBegin('node_id', TType.I32, 1)
oprot.writeI32(self.node_id)
oprot.writeFieldEnd()
if self.fragment_idx is not None:
oprot.writeFieldBegin('fragment_idx', TType.I32, 2)
oprot.writeI32(self.fragment_idx)
oprot.writeFieldEnd()
if self.label is not None:
oprot.writeFieldBegin('label', TType.STRING, 3)
oprot.writeString(self.label)
oprot.writeFieldEnd()
if self.label_detail is not None:
oprot.writeFieldBegin('label_detail', TType.STRING, 4)
oprot.writeString(self.label_detail)
oprot.writeFieldEnd()
if self.num_children is not None:
oprot.writeFieldBegin('num_children', TType.I32, 5)
oprot.writeI32(self.num_children)
oprot.writeFieldEnd()
if self.estimated_stats is not None:
oprot.writeFieldBegin('estimated_stats', TType.STRUCT, 6)
self.estimated_stats.write(oprot)
oprot.writeFieldEnd()
if self.exec_stats is not None:
oprot.writeFieldBegin('exec_stats', TType.LIST, 7)
oprot.writeListBegin(TType.STRUCT, len(self.exec_stats))
for iter6 in self.exec_stats:
iter6.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.is_broadcast is not None:
oprot.writeFieldBegin('is_broadcast', TType.BOOL, 8)
oprot.writeBool(self.is_broadcast)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.node_id is None:
raise TProtocol.TProtocolException(message='Required field node_id is unset!')
if self.fragment_idx is None:
raise TProtocol.TProtocolException(message='Required field fragment_idx is unset!')
if self.label is None:
raise TProtocol.TProtocolException(message='Required field label is unset!')
if self.num_children is None:
raise TProtocol.TProtocolException(message='Required field num_children is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.node_id)
value = (value * 31) ^ hash(self.fragment_idx)
value = (value * 31) ^ hash(self.label)
value = (value * 31) ^ hash(self.label_detail)
value = (value * 31) ^ hash(self.num_children)
value = (value * 31) ^ hash(self.estimated_stats)
value = (value * 31) ^ hash(self.exec_stats)
value = (value * 31) ^ hash(self.is_broadcast)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TExecProgress(object):
"""
Attributes:
- total_scan_ranges
- num_completed_scan_ranges
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'total_scan_ranges', None, None, ), # 1
(2, TType.I64, 'num_completed_scan_ranges', None, None, ), # 2
)
def __init__(self, total_scan_ranges=None, num_completed_scan_ranges=None,):
self.total_scan_ranges = total_scan_ranges
self.num_completed_scan_ranges = num_completed_scan_ranges
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.total_scan_ranges = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.num_completed_scan_ranges = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TExecProgress')
if self.total_scan_ranges is not None:
oprot.writeFieldBegin('total_scan_ranges', TType.I64, 1)
oprot.writeI64(self.total_scan_ranges)
oprot.writeFieldEnd()
if self.num_completed_scan_ranges is not None:
oprot.writeFieldBegin('num_completed_scan_ranges', TType.I64, 2)
oprot.writeI64(self.num_completed_scan_ranges)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.total_scan_ranges)
value = (value * 31) ^ hash(self.num_completed_scan_ranges)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class TExecSummary(object):
"""
Attributes:
- state
- status
- nodes
- exch_to_sender_map
- error_logs
- progress
- is_queued
- queued_reason
"""
thrift_spec = (
None, # 0
(1, TType.I32, 'state', None, None, ), # 1
(2, TType.STRUCT, 'status', (impala._thrift_gen.Status.ttypes.TStatus, impala._thrift_gen.Status.ttypes.TStatus.thrift_spec), None, ), # 2
(3, TType.LIST, 'nodes', (TType.STRUCT,(TPlanNodeExecSummary, TPlanNodeExecSummary.thrift_spec)), None, ), # 3
(4, TType.MAP, 'exch_to_sender_map', (TType.I32,None,TType.I32,None), None, ), # 4
(5, TType.LIST, 'error_logs', (TType.STRING,None), None, ), # 5
(6, TType.STRUCT, 'progress', (TExecProgress, TExecProgress.thrift_spec), None, ), # 6
(7, TType.BOOL, 'is_queued', None, None, ), # 7
(8, TType.STRING, 'queued_reason', None, None, ), # 8
)
def __init__(self, state=None, status=None, nodes=None, exch_to_sender_map=None, error_logs=None, progress=None, is_queued=None, queued_reason=None,):
self.state = state
self.status = status
self.nodes = nodes
self.exch_to_sender_map = exch_to_sender_map
self.error_logs = error_logs
self.progress = progress
self.is_queued = is_queued
self.queued_reason = queued_reason
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.state = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRUCT:
self.status = impala._thrift_gen.Status.ttypes.TStatus()
self.status.read(iprot)
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.LIST:
self.nodes = []
(_etype10, _size7) = iprot.readListBegin()
for _i11 in xrange(_size7):
_elem12 = TPlanNodeExecSummary()
_elem12.read(iprot)
self.nodes.append(_elem12)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.MAP:
self.exch_to_sender_map = {}
(_ktype14, _vtype15, _size13 ) = iprot.readMapBegin()
for _i17 in xrange(_size13):
_key18 = iprot.readI32()
_val19 = iprot.readI32()
self.exch_to_sender_map[_key18] = _val19
iprot.readMapEnd()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.LIST:
self.error_logs = []
(_etype23, _size20) = iprot.readListBegin()
for _i24 in xrange(_size20):
_elem25 = iprot.readString()
self.error_logs.append(_elem25)
iprot.readListEnd()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRUCT:
self.progress = TExecProgress()
self.progress.read(iprot)
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.BOOL:
self.is_queued = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.STRING:
self.queued_reason = iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('TExecSummary')
if self.state is not None:
oprot.writeFieldBegin('state', TType.I32, 1)
oprot.writeI32(self.state)
oprot.writeFieldEnd()
if self.status is not None:
oprot.writeFieldBegin('status', TType.STRUCT, 2)
self.status.write(oprot)
oprot.writeFieldEnd()
if self.nodes is not None:
oprot.writeFieldBegin('nodes', TType.LIST, 3)
oprot.writeListBegin(TType.STRUCT, len(self.nodes))
for iter26 in self.nodes:
iter26.write(oprot)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.exch_to_sender_map is not None:
oprot.writeFieldBegin('exch_to_sender_map', TType.MAP, 4)
oprot.writeMapBegin(TType.I32, TType.I32, len(self.exch_to_sender_map))
for kiter27,viter28 in self.exch_to_sender_map.items():
oprot.writeI32(kiter27)
oprot.writeI32(viter28)
oprot.writeMapEnd()
oprot.writeFieldEnd()
if self.error_logs is not None:
oprot.writeFieldBegin('error_logs', TType.LIST, 5)
oprot.writeListBegin(TType.STRING, len(self.error_logs))
for iter29 in self.error_logs:
oprot.writeString(iter29)
oprot.writeListEnd()
oprot.writeFieldEnd()
if self.progress is not None:
oprot.writeFieldBegin('progress', TType.STRUCT, 6)
self.progress.write(oprot)
oprot.writeFieldEnd()
if self.is_queued is not None:
oprot.writeFieldBegin('is_queued', TType.BOOL, 7)
oprot.writeBool(self.is_queued)
oprot.writeFieldEnd()
if self.queued_reason is not None:
oprot.writeFieldBegin('queued_reason', TType.STRING, 8)
oprot.writeString(self.queued_reason)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
if self.state is None:
raise TProtocol.TProtocolException(message='Required field state is unset!')
return
def __hash__(self):
value = 17
value = (value * 31) ^ hash(self.state)
value = (value * 31) ^ hash(self.status)
value = (value * 31) ^ hash(self.nodes)
value = (value * 31) ^ hash(self.exch_to_sender_map)
value = (value * 31) ^ hash(self.error_logs)
value = (value * 31) ^ hash(self.progress)
value = (value * 31) ^ hash(self.is_queued)
value = (value * 31) ^ hash(self.queued_reason)
return value
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
py | b4093817f2ca3be94b2b6d05d8b39eac0b3d005a | #!/Users/Varun/Documents/GitHub/LockScreen/venv/bin/python
# $Id: rst2html.py 4564 2006-05-21 20:44:42Z wiemann $
# Author: David Goodger <[email protected]>
# Copyright: This module has been placed in the public domain.
"""
A minimal front end to the Docutils Publisher, producing HTML.
"""
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
|
py | b4093934e85db1c13f0ab8a65a847d33b6ffe568 | '''
This file is part of openSMILE.
Copyright (c) audEERING GmbH. All rights reserved.
See the file COPYING for details on license terms.
'''
from ctypes import *
from typing import Any, Callable, Dict, List, Optional
import json
import os
import numpy as np
if os.name == 'nt':
smileapi_path = os.path.join(os.getcwd(), "SMILEapi.dll")
else:
smileapi_path = os.path.join(os.getcwd(), "libSMILEapi.so")
smileapi = cdll.LoadLibrary(smileapi_path)
# definitions from smileComponent.hpp
CMSG_textLen = 64
CMSG_typenameLen = 32
CMSG_nUserData = 8
class ComponentMessage(Structure):
"""
An openSMILE component message.
"""
_fields_ = [
("_msgtype", c_char * CMSG_typenameLen),
("_msgname", c_char * CMSG_typenameLen),
("_sender", c_char_p),
("smileTime", c_double),
("userTime1", c_double),
("userTime2", c_double),
("readerTime", c_double),
("msgid", c_int),
("floatData", c_double * CMSG_nUserData),
("intData", c_int * CMSG_nUserData),
("_msgtext", c_char * CMSG_textLen),
("userflag1", c_int),
("userflag2", c_int),
("userflag3", c_int),
("custData", c_void_p),
("custData2", c_void_p),
("custDataSize", c_int),
("custData2Size", c_int),
("custDataType", c_int),
("custData2Type", c_int)
]
@property
def msgtype(self):
return self._msgtype.decode("utf-8")
@property
def msgname(self):
return self._msgname.decode("utf-8")
@property
def sender(self):
return self._sender.decode("utf-8")
@property
def msgtext(self):
return self._msgtext.decode("utf-8")
def unpack_json(self):
"""
Unpacks a component message that wraps a JSON object and returns the JSON data as
a dictionary.
"""
if self.msgtype != "_CONTAINER" or self.msgname != "jsonObject":
raise ValueError("Message does not contain JSON data")
cust_data = cast(self.custData, c_char_p)
if cust_data:
return json.loads(cust_data.value.decode("ascii"))
else:
return None
def __str__(self):
return "type: {}, name: {}, sender: {}, msgtext: {}".format(
self.msgtype,
self.msgname,
self.sender,
self.msgtext)
# Success and error return codes
SMILE_SUCCESS = 0 # success
SMILE_FAIL = 1 # generic error
SMILE_INVALID_ARG = 2 # an invalid argument was passed
SMILE_INVALID_STATE = 3 # openSMILE was in an invalid state for the called function
SMILE_COMP_NOT_FOUND = 4 # component instance was not found
SMILE_LICENSE_FAIL = 5 # license validation check failed
SMILE_CONFIG_PARSE_FAIL = 6 # configuration could not be loaded
SMILE_CONFIG_INIT_FAIL = 7 # configuration could not be initialized
SMILE_NOT_WRITTEN = 8 # data could not be written to a cExternalSource/cExternalAudioSource component
# openSMILE states
SMILE_UNINITIALIZED = 0 # no configuration has been loaded yet
SMILE_INITIALIZED = 1 # a configuration has been loaded
SMILE_RUNNING = 2 # openSMILE is running
SMILE_ENDED = 3 # openSMILE has finished
# openSMILE log message types
SMILE_LOG_MESSAGE = 1
SMILE_LOG_WARNING = 2
SMILE_LOG_ERROR = 3
SMILE_LOG_DEBUG = 4
SMILE_LOG_PRINT = 5
class LogMessage(Structure):
_fields_ = [
("type", c_int),
("level", c_int),
("_text", c_char_p),
("_module", c_char_p)
]
@property
def text(self):
return self._text.decode("utf-8")
@property
def module(self):
if self._module:
return self._module.decode("utf-8")
else:
return None
def __str__(self):
if self.level == SMILE_LOG_PRINT:
return self.text
else:
types = ["MSG", "WRN", "ERR", "DBG"]
result = "({}) [{}]".format(types[self.type - 1], self.level)
if self._module is not None:
result += " " + self.module
result += ": " + self.text
return result
class FrameMetaData(Structure):
_fields_ = [
("vIdx", c_long),
("time", c_double),
("period", c_double),
("lengthSec", c_double)
]
def __str__(self):
return "vIdx: {}, time: {}, period: {}, lengthSec: {}".format(
self.vIdx,
self.time,
self.period,
self.lengthSec)
LogCallback = CFUNCTYPE(None, c_void_p, LogMessage, c_void_p)
StateChangedCallback = CFUNCTYPE(None, c_void_p, c_int, c_void_p)
# TODO: return type bool does not exist in C!
ExternalSinkCallback = CFUNCTYPE(c_int, POINTER(c_float), c_long, c_void_p)
ExternalSinkCallbackEx = CFUNCTYPE(c_int, POINTER(c_float), c_long, c_long, POINTER(FrameMetaData), c_void_p)
ExternalMessageInterfaceCallback = CFUNCTYPE(c_int, POINTER(ComponentMessage), c_void_p)
ExternalMessageInterfaceJsonCallback = CFUNCTYPE(c_int, c_char_p, c_void_p)
smileapi.smile_new.argtypes = []
smileapi.smile_new.restype = c_void_p
smileapi.smile_initialize.argtypes = [c_void_p, c_char_p, c_int, c_void_p, c_int, c_int, c_int, c_void_p]
smileapi.smile_initialize.restype = c_int
smileapi.smile_run.argtypes = [c_void_p]
smileapi.smile_run.restype = c_int
smileapi.smile_abort.argtypes = [c_void_p]
smileapi.smile_abort.restype = c_int
smileapi.smile_reset.argtypes = [c_void_p]
smileapi.smile_reset.restype = c_int
smileapi.smile_set_log_callback.argtypes = [c_void_p, LogCallback, c_void_p]
smileapi.smile_set_log_callback.restype = c_int
smileapi.smile_get_state.argtypes = [c_void_p]
smileapi.smile_get_state.restype = c_int
smileapi.smile_set_state_callback.argtypes = [c_void_p, StateChangedCallback, c_void_p]
smileapi.smile_set_state_callback.restype = c_int
smileapi.smile_free.argtypes = [c_void_p]
smileapi.smile_free.restype = None
smileapi.smile_extsource_write_data.argtypes = [c_void_p, c_char_p, POINTER(c_float), c_int]
smileapi.smile_extsource_write_data.restype = c_int
smileapi.smile_extsource_set_external_eoi.argtypes = [c_void_p, c_char_p]
smileapi.smile_extsource_set_external_eoi.restype = c_int
smileapi.smile_extaudiosource_write_data.argtypes = [c_void_p, c_char_p, c_void_p, c_int]
smileapi.smile_extaudiosource_write_data.restype = c_int
smileapi.smile_extaudiosource_set_external_eoi.argtypes = [c_void_p, c_char_p]
smileapi.smile_extaudiosource_set_external_eoi.restype = c_int
smileapi.smile_extsink_set_data_callback.argtypes = [c_void_p, c_char_p, ExternalSinkCallback, c_void_p]
smileapi.smile_extsink_set_data_callback.restype = c_int
smileapi.smile_extsink_set_data_callback_ex.argtypes = [c_void_p, c_char_p, ExternalSinkCallbackEx, c_void_p]
smileapi.smile_extsink_set_data_callback_ex.restype = c_int
smileapi.smile_extsink_get_num_elements.argtypes = [c_void_p, c_char_p, POINTER(c_long)]
smileapi.smile_extsink_get_num_elements.restype = c_int
smileapi.smile_extsink_get_element_name.argtypes = [c_void_p, c_char_p, c_long, POINTER(c_char_p)]
smileapi.smile_extsink_get_element_name.restype = c_int
smileapi.smile_extmsginterface_set_msg_callback.argtypes = [c_void_p, c_char_p, ExternalMessageInterfaceCallback, c_void_p]
smileapi.smile_extmsginterface_set_msg_callback.restype = c_int
smileapi.smile_extmsginterface_set_json_msg_callback.argtypes = [c_void_p, c_char_p, ExternalMessageInterfaceJsonCallback, c_void_p]
smileapi.smile_extmsginterface_set_json_msg_callback.restype = c_int
smileapi.smile_error_msg.argtypes = [c_void_p]
smileapi.smile_error_msg.restype = c_char_p
def c_char_p_arr(l):
arr = (c_char_p * (len(l) + 1))()
arr[:-1] = l
arr[-1] = None
return arr
class OpenSmileException(Exception):
"""
Exception thrown for internal openSMILE errors.
"""
def __init__(self, code: int, message: Optional[str] = None):
self.code = code
self.message = message
def __str__(self):
if self.message:
return "Code: {}, Message: {}".format(self.code, self.message)
else:
return "Code: {}".format(self.code)
class OpenSMILE(object):
"""
The main class implementing the interface to openSMILE.
"""
def __init__(self):
self._smileobj = smileapi.smile_new()
if self._smileobj is None:
raise OpenSmileException(SMILE_FAIL, "could not create new SMILEapi object")
self._callbacks = []
def initialize(self, config_file: str, options: Dict[str, Any]=None, loglevel: int=2, debug: bool=False,
console_output: bool=False, log_file: str = None):
"""
Initializes openSMILE with the provided config file and command-line options.
"""
options_flat = list(map(lambda v: bytes(str(v), "ascii"), sum(options.items(), ())))
options_char_arr = c_char_p_arr(options_flat)
log_file = bytes(log_file, "ascii") if log_file else int(0)
self._check_smile_result(smileapi.smile_initialize(self._smileobj, bytes(config_file, "ascii"), len(options), options_char_arr, loglevel, int(debug), int(console_output), log_file))
def external_source_write_data(self, component_name: str, data: np.ndarray) -> bool:
"""
Writes a data buffer to the specified instance of a cExternalSource component.
Returns True if the data was written successfully, otherwise returns False
(e.g. if the internal buffer of the component is full).
"""
if len(data.shape) != 1:
raise ValueError("data parameter must have exactly one dimension")
if data.dtype.name != "float32":
raise ValueError("data parameter must have dtype float32")
data_p = data.ctypes.data_as(POINTER(c_float))
result = smileapi.smile_extsource_write_data(self._smileobj, bytes(component_name, "ascii"), data_p, len(data))
if result == SMILE_SUCCESS:
return True
elif result == SMILE_NOT_WRITTEN:
return False
else:
self._check_smile_result(result)
def external_source_set_eoi(self, component_name: str):
"""
Signals the end of the input for the specified cExternalSource component instance.
Attempts to write more data to the component after calling this method will fail.
Returns True if the end-of-input signal was set successfully, otherwise False.
"""
self._check_smile_result(smileapi.smile_extsource_set_external_eoi(self._smileobj, bytes(component_name, "ascii")))
def external_audio_source_write_data(self, component_name: str, data: bytes) -> bool:
"""
Writes a data buffer to the specified instance of a cExternalAudioSource component.
The data must match the specified data format for the component (sample size,
number of channels, etc.).
Returns True if the data was written successfully, otherwise returns False
(e.g. if the internal buffer of the component is full).
"""
result = smileapi.smile_extaudiosource_write_data(self._smileobj, bytes(component_name, "ascii"), data, len(data))
if result == SMILE_SUCCESS:
return True
elif result == SMILE_NOT_WRITTEN:
return False
else:
self._check_smile_result(result)
def external_audio_source_set_eoi(self, component_name: str):
"""
Signals the end of the input for the specified cExternalAudioSource component instance.
Attempts to write more data to the component after calling this method will fail.
Returns True if the end-of-input signal was set successfully, otherwise False.
"""
self._check_smile_result(smileapi.smile_extaudiosource_set_external_eoi(self._smileobj, bytes(component_name, "ascii")))
def external_sink_set_callback(self, component_name: str, callback: Callable[[np.ndarray], None]):
"""
Sets the callback function for the specified cExternalSink component instance.
The function will get called whenever another openSMILE component writes data to
the cExternalSink component.
"""
def internal_callback(data, vector_size, param):
numpy_array = np.ctypeslib.as_array(data, shape=(vector_size,))
callback(numpy_array)
return 1
cb = ExternalSinkCallback(internal_callback)
# we need to keep a reference to any callback objects as otherwise they may get garbage-collected
self._callbacks.append(cb)
self._check_smile_result(smileapi.smile_extsink_set_data_callback(self._smileobj, bytes(component_name, "ascii"), cb, None))
def external_sink_set_callback_ex(self, component_name: str, callback: Callable[[np.ndarray], None]):
"""
Sets the extended callback function for the specified cExternalSink component instance.
The function will get called whenever another openSMILE component writes data to
the cExternalSink component.
"""
def internal_callback_ex(data, nt, n, meta: POINTER(FrameMetaData), _):
numpy_array = np.ctypeslib.as_array(data, shape=(nt, n))
callback(numpy_array, meta.contents)
return 1
cb = ExternalSinkCallbackEx(internal_callback_ex)
# we need to keep a reference to any callback objects as otherwise they may get garbage-collected
self._callbacks.append(cb)
self._check_smile_result(smileapi.smile_extsink_set_data_callback_ex(self._smileobj, bytes(component_name, "ascii"), cb, None))
def external_sink_get_num_elements(self, component_name: str) -> int:
num_elements = c_long()
self._check_smile_result(smileapi.smile_extsink_get_num_elements(self._smileobj, bytes(component_name, "ascii"), byref(num_elements)))
return num_elements.value
def external_sink_get_element_name(self, component_name: str, idx: int) -> str:
element_name = c_char_p()
self._check_smile_result(smileapi.smile_extsink_get_element_name(self._smileobj, bytes(component_name, "ascii"), idx, byref(element_name)))
return element_name.value.decode("ascii")
def external_message_interface_set_callback(self, component_name: str, callback: Callable[[ComponentMessage], None]):
"""
Sets the callback function for the specified cExternalMessageInterface component instance.
The function will get called whenever the component receives a message.
"""
# we need to keep a reference to any callback objects as otherwise they may get garbage-collected
def internal_callback(message: POINTER(ComponentMessage), param):
callback(message.contents)
return 1
cb = ExternalMessageInterfaceCallback(internal_callback)
self._callbacks.append(cb)
self._check_smile_result(smileapi.smile_extmsginterface_set_msg_callback(self._smileobj, bytes(component_name, "ascii"), cb, None))
def external_message_interface_set_json_callback(self, component_name: str, callback: Callable[[Dict], None]):
"""
Sets the callback function for the specified cExternalMessageInterface component instance.
The function will get called whenever the component receives a message.
"""
# we need to keep a reference to any callback objects as otherwise they may get garbage-collected
def internal_callback(json_message: bytes, param):
callback(json.loads(json_message.decode("ascii")))
return 1
cb = ExternalMessageInterfaceJsonCallback(internal_callback)
self._callbacks.append(cb)
self._check_smile_result(smileapi.smile_extmsginterface_set_json_msg_callback(self._smileobj, bytes(component_name, "ascii"), cb, None))
def set_log_callback(self, callback: Callable[[LogMessage], None]):
"""
Sets a callback function for log messages.
The function will get called whenever the openSMILE generates a log message.
"""
# we need to keep a reference to any callback objects as otherwise they may get garbage-collected
def internal_callback(smileobj, message: LogMessage, param):
callback(message)
cb = LogCallback(internal_callback)
self._callbacks.append(cb)
self._check_smile_result(smileapi.smile_set_log_callback(self._smileobj, cb, None))
def run(self):
"""
Starts processing and blocks until finished.
"""
self._check_smile_result(smileapi.smile_run(self._smileobj))
def abort(self):
"""
Requests abortion of the current run.
Note that openSMILE does not immediately stop after this function returns.
It might continue to run for a short while until the run method returns.
"""
self._check_smile_result(smileapi.smile_abort(self._smileobj))
def reset(self):
"""
Resets the internal state of openSMILE after a run has finished or was aborted.
After resetting, you may call 'run' again without the need to call 'initialize' first.
You must re-register any cExternalSink/cExternalMessageInterface callbacks, though.
"""
self._check_smile_result(smileapi.smile_reset(self._smileobj))
def free(self):
"""
Frees any internal resources allocated by openSMILE.
"""
if self._smileobj is not None:
smileapi.smile_free(self._smileobj)
self._smileobj = None
def _check_smile_result(self, result: int):
if result != SMILE_SUCCESS:
message = smileapi.smile_error_msg(self._smileobj)
if message is None or len(message) == 0:
raise OpenSmileException(result)
else:
raise OpenSmileException(result, message.decode("ascii"))
@staticmethod
def process(config_file: str, options: Dict[str, Any],
inputs: Dict[str, np.ndarray], outputs: List[str]) -> Dict[str, np.ndarray]:
"""
Runs the specified config file on a set of input data buffers and returns the
specified set of output buffers.
"""
opensmile = OpenSMILE()
opensmile.initialize(config_file, options)
for (input, data) in inputs.items():
if not opensmile.external_source_write_data(input, data):
raise Exception("Could not write input data to component '{}'".format(input))
opensmile.external_source_set_eoi(input)
output_data = {}
for output in outputs:
def callback(data: np.ndarray):
if output not in output_data:
output_data[output] = np.copy(data)
else:
output_data[output] = np.vstack((output_data[output], data))
opensmile.external_sink_set_callback(output, callback)
opensmile.run()
opensmile.free()
return output_data
|
py | b40939cfd0377933ad23a34cb3e1234e7906a75d | #!/usr/bin/env python
# Copyright Contributors to the Open Shading Language project.
# SPDX-License-Identifier: BSD-3-Clause
# https://github.com/imageworks/OpenShadingLanguage
command += testshade("-g 256 256 --center -od uint8 -o Cout out.tif test")
outputs = [ "out.txt", "out.tif" ]
|
py | b40939f679635e7986b5f66428266ba407bd39e3 | """
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from . import views
from gwells.views import *
from gwells.views.admin import *
from django.views.generic import TemplateView
# Creating 2 versions of the app_root. One without and one with trailing slash
# This will allow for any or no additional app_root context to be provided
app_root = settings.APP_CONTEXT_ROOT
if app_root:
app_root_slash = app_root + '/'
else:
app_root_slash = app_root
urlpatterns = [
# url(r'^'+ app_root +'$', views.HomeView.as_view(), name='home'),
url(r'^'+ app_root_slash +'robots\.txt$', TemplateView.as_view(template_name='robots.txt', content_type='text/plain'), name='robots'),
url(r'^'+ app_root_slash +'$', SearchView.well_search, name='home'),
url(r'^'+ app_root_slash +'search$', SearchView.well_search, name='search'),
# url(r'^(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})/$', views.DetailView.as_view(), name='detail'),
url(r'^'+ app_root_slash +'well/(?P<pk>[0-9]+)$', WellDetailView.as_view(), name='well_detail'),
url(r'^'+ app_root_slash +'registry-legacy$', RegistryView.as_view(), name='registry-legacy'),
url(r'^'+ app_root_slash +'submission/(?P<pk>[0-9]+)$', ActivitySubmissionDetailView.as_view(), name='activity_submission_detail'),
url(r'^'+ app_root_slash +'health$', HealthView.health, name='health'),
url(r'^'+ app_root_slash +'groundwater-information', TemplateView.as_view(template_name='gwells/groundwater_information.html'), name='groundwater_information'),
url(r'^'+ app_root_slash +'ajax/map_well_search/$', SearchView.map_well_search, name='map_well_search'),
url(r'^'+ app_root_slash +'registries/', include('registries.urls')),
]
if settings.ENABLE_DATA_ENTRY:
urlpatterns = [
url(r'^'+ app_root_slash +'submission/$', ActivitySubmissionListView.as_view(), name='activity_submission_list'),
url(r'^'+ app_root_slash +'submission/create$', ActivitySubmissionWizardView.as_view(views.FORMS), name='activity_submission_create'),
url(r'^'+ app_root_slash +'site_admin', AdminView.as_view(), name='site_admin'),
url(r'^'+ app_root_slash +'admin/survey', SurveyView.as_view(), name='survey'),
url(r'^'+ app_root_slash +'admin/survey/(?P<pk>[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12})$', SurveyView.as_view(), name='survey'),
#
url(r'^'+ app_root_slash +'admin/', include(admin.site.urls)),
url(r'^'+ app_root_slash +'accounts/', include('django.contrib.auth.urls')),
] + urlpatterns
if settings.DEBUG:
import debug_toolbar
urlpatterns = [
url(r'^__debug__/', include(debug_toolbar.urls)),
] + urlpatterns
|
py | b4093b24eb1ac231a4140846954055dfd69b65df | import os
import sys
import json
import traceback
import argparse
import requests
from requests.exceptions import HTTPError, ConnectionError
from deriva.core import BaseCLI, KeyValuePairArgs, format_credential, format_exception, urlparse
from deriva.transfer import DerivaRestore, DerivaRestoreError, DerivaRestoreConfigurationError, \
DerivaRestoreAuthenticationError, DerivaRestoreAuthorizationError
class DerivaRestoreCLI(BaseCLI):
def __init__(self, description, epilog, **kwargs):
BaseCLI.__init__(self, description, epilog, **kwargs)
self.parser.add_argument("--catalog", metavar="<1>", help="Catalog number. If a catalog number is not "
"specified, a new catalog will be created.")
self.parser.add_argument("input_path", metavar="<input_path>", help="Path to backup file or directory.")
mutex_group = self.parser.add_mutually_exclusive_group()
mutex_group.add_argument("--no-data", action="store_true",
help="Do not restore table data, restore schema only.")
mutex_group.add_argument("--no-schema", action="store_true",
help="Do not restore schema, restore data only.")
self.parser.add_argument("--no-assets", action="store_true",
help="Do not restore asset data, if present.")
self.parser.add_argument("--no-annotations", action="store_true",
help="Do not restore annotations.")
self.parser.add_argument("--no-policy", action="store_true",
help="Do not restore access policy and ACLs.")
self.parser.add_argument("--no-bag-materialize", action="store_true",
help="If the input format is a bag, do not materialize prior to restore.")
self.parser.add_argument("--weak-bag-validation", action="store_true",
help="If the input format is a bag, "
"do not abort the restore if the bag fails validation.")
self.parser.add_argument("--exclude-object", type=lambda s: [item.strip() for item in s.split(',')],
metavar="<schema>, <schema:table>, ...",
help="List of comma-delimited schema-name and/or schema-name/table-name to "
"exclude from the restore process, in the form <schema> or <schema:table>.")
self.parser.add_argument("--exclude-data", type=lambda s: [item.strip() for item in s.split(',')],
metavar="<schema>, <schema:table>, ...",
help="List of comma-delimited schema-name and/or schema-name/table-name to "
"exclude from the restore process, in the form <schema> or <schema:table>.")
self.parser.add_argument("envars", metavar="[key=value key=value ...]",
nargs=argparse.REMAINDER, action=KeyValuePairArgs, default={},
help="Variable length of whitespace-delimited key=value pair arguments used for "
"populating the processing environment with parameters for keyword substitution."
"For example: key1=value1 key2=value2")
def main(self):
try:
args = self.parse_cli()
except ValueError as e:
sys.stderr.write(str(e))
return 2
if not args.quiet:
sys.stderr.write("\n")
try:
assert args.host, "A hostname is required!"
server = dict()
server["catalog_id"] = args.catalog
if args.host.startswith("http"):
url = urlparse(args.host)
server["protocol"] = url.scheme
server["host"] = url.netloc
else:
server["protocol"] = "https"
server["host"] = args.host
restorer = DerivaRestore(server, **vars(args))
try:
restorer.restore()
except ConnectionError as e:
raise DerivaRestoreError("Connection error occurred. %s" % format_exception(e))
except HTTPError as e:
if e.response.status_code == requests.codes.unauthorized:
raise DerivaRestoreAuthenticationError(
"The requested service requires authentication and a valid login session could "
"not be found for the specified host. Server responded: %s" % e)
elif e.response.status_code == requests.codes.forbidden:
raise DerivaRestoreAuthorizationError(
"A requested operation was forbidden. Server responded: %s" % e)
except (DerivaRestoreError, DerivaRestoreConfigurationError,
DerivaRestoreAuthenticationError, DerivaRestoreAuthorizationError) as e:
sys.stderr.write(("\n" if not args.quiet else "") + format_exception(e))
if args.debug:
traceback.print_exc()
return 1
except:
sys.stderr.write("An unexpected error occurred.")
traceback.print_exc()
return 1
finally:
if not args.quiet:
sys.stderr.write("\n\n")
return 0
|
py | b4093b31314cda878e2235ce41e38c4be9ae147d | #!/usr/bin/env python
from storm_control.test.hal.standardHalTest import halTest
def test_hal_tcp_cfl7():
halTest(config_xml = "none_tcp_config.xml",
class_name = "CheckFocusLock7",
show_gui = True,
test_module = "storm_control.test.hal.tcp_tests")
if (__name__ == "__main__"):
test_hal_tcp_cfl7()
|
py | b4093f70553b2fd40e8c33815da8e7b4941456da | # Generated by Django 3.2.5 on 2021-07-13 06:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('insta', '0007_comment'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='image',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='profile',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
py | b4093ff7a6a5d282826da950cf55be30232a1fb0 | import tensorflow as tf
import numpy as np
import scipy.misc
import sys
import time
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True)
def batch_normalize(X, eps=1e-6,flag=False):
if flag :
if X.get_shape().ndims == 4:
mean, vari = tf.nn.moments(X, [0,1,2], keep_dims=True)
return tf.nn.batch_normalization(X,mean, vari, variance_epsilon=eps)
elif X.get_shape().ndims == 2:
mean, vari = tf.nn.moments(X, 0, keep_dims=True)
return tf.nn.batch_normalization(X, mean, vari, variance_epsilon=eps)
if X.get_shape().ndims == 4 :
mean = tf.reduce_mean(X,[0,1,2])
stddev = tf.reduce_mean(tf.square(X-mean),[0,1,2])
X = (X - mean)/tf.sqrt(stddev + eps)
elif X.get_shape().ndims == 2:
mean = tf.reduce_mean(X,[0])
stddev = tf.reduce_mean(tf.square(X-mean),[0])
X = (X - mean)/tf.sqrt(stddev + eps)
else:
raise NoImplementationForSuchDimensions
return X
def LeakyRelu(X,alpha=0.3):
return alpha*X + (1-alpha)*tf.nn.relu(X)
def lrelu(X):
return LeakyRelu(X)
class DCGAN():
def __init__ (self, batch_size = 50, image_shape = [28,28,1], embedding_size = 128, num_class =10, dim1 = 1024, dim2 = 128, dim3 = 64, dim_channel = 1,
dim4=16, learning_rate_1=sys.argv[1], learning_rate_2=sys.argv[2], momentum=sys.argv[3], folder_name=sys.argv[4]):
self.batch_size = batch_size
self.image_shape = image_shape
self.embedding_size = embedding_size
self.num_class = num_class
self.dim1 = dim1
self.dim2 = dim2
self.dim3 = dim3
self.dim4 = dim4
self.learning_rate_1 = float(learning_rate_1)
self.learning_rate_2 = float(learning_rate_2)
self.momentum = float(momentum)
self.folder_name = folder_name
self.dim_1 = self.image_shape[0]
self.dim_2 = self.image_shape[0] // 2
self.dim_4 = self.image_shape[0] // 4
self.dim_8 = self.image_shape[0] // 8
self.dim_channel = dim_channel
self.device = "/gpu:0"
self.image_size = reduce(lambda x,y : x*y, image_shape)
self.initializer = tf.random_normal_initializer(stddev=0.02)
def learningR(self):
return self.learning_rate_1 , self.learning_rate_2, self.momentum, self.folder_name
def normalize(self, X,reuse=False, name=None, flag=False):
if not flag:
mean, vari = tf.nn.moments(X, 0, keep_dims=True)
else:
mean, vari = tf.nn.moments(X, [0,1,2], keep_dims=True)
return tf.nn.batch_normalization(X, mean, vari, offset=None, scale=None, variance_epsilon=1e-6,name=name)
def cross_entropy(self, X, flag=True):
if flag:
labels = tf.ones_like(X)
else:
labels = tf.zeros_like(X)
softmax = tf.nn.softmax_cross_entropy_with_logits(logits=X, labels=labels)
return tf.reduce_mean(softmax)
def build_model(self):
with tf.device("/gpu:0"):
embedding = tf.placeholder(tf.float32, [self.batch_size, self.embedding_size])
classes = tf.placeholder(tf.float32, [self.batch_size,self.num_class])
r_image = tf.placeholder(tf.float32,[self.batch_size] + self.image_shape)
real_image = tf.reshape(r_image,[self.batch_size] + self.image_shape)
with tf.variable_scope("generator") as scope:
h4 = self.generate(embedding,classes,scope)
g_image = h4
with tf.variable_scope("discriminator") as scope:
real_value = self.discriminate(real_image,classes,scope)
with tf.variable_scope("discriminator") as scope:
scope.reuse_variables()
fake_value = self.discriminate(g_image,classes,scope)
real_value_softmax = tf.nn.softmax(real_value)
energy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=real_value_softmax, logits=fake_value))
d_cost = 0.7*(self.cross_entropy(real_value, True) + self.cross_entropy(fake_value, False)) - (0.3*energy)
g_cost = 0.7*(self.cross_entropy(fake_value, True)) + (0.3*energy)
return embedding, classes, r_image, d_cost, g_cost, fake_value, real_value
def discriminate(self, image, classes, scope):
with tf.device(self.device):
ystack = tf.reshape(classes, [self.batch_size, 1,1, self.num_class])
yneed_1 = ystack*tf.ones([self.batch_size, self.dim_1, self.dim_1, self.num_class])
yneed_2 = ystack*tf.ones([self.batch_size, self.dim_2, self.dim_2, self.num_class])
yneed_3 = ystack*tf.ones([self.batch_size, self.dim_4, self.dim_4, self.num_class])
LeakyReLU = tf.contrib.keras.layers.LeakyReLU()
image_proc = tf.concat(axis=3,
values=[self.normalize(image, flag=True),yneed_1])
h1 = tf.layers.conv2d(image_proc, filters=self.dim4, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_1")
h1_relu = LeakyReLU(h1)
h1_concat = self.normalize(tf.concat(axis=3, values=[h1, yneed_2]))
h2 = tf.layers.conv2d(h1_concat, filters=self.dim3, kernel_size=[4,4],
strides=[2,2], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_2")
h2_relu = LeakyReLU(h2)
h2_concat = self.normalize(tf.concat(axis=3, values=[h2_relu, yneed_3]))
h3 = tf.layers.conv2d(h2_concat, filters=self.dim2, kernel_size=[5,5],
strides=[1,1], padding='SAME',
activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_3")
h3_relu = LeakyReLU(h3)
h3_reshape = tf.reshape(h3_relu, shape=[-1, self.dim_4*self.dim_4*self.dim2])
h3_concat = self.normalize(tf.concat(axis=1, values=[h3_reshape, classes]),
name="h3_concat_normalize", reuse=scope.reuse)
h4 = tf.layers.dense(h3_concat, units=self.dim1,
activation=None,
kernel_initializer=self.initializer,
name='dense_1',
reuse=scope.reuse)
h4_relu = LeakyReLU(h4)
h4_concat = self.normalize(tf.concat(axis=1, values=[h4_relu, classes]),
name="h4_concat_normalize",reuse=scope.reuse)
h5 = tf.layers.dense(h4_concat, units=num_class,
activation=None,
kernel_initializer=self.initializer,
name='dense_2',
reuse=scope.reuse)
h5_relu = LeakyReLU(h5)
h5_concat = self.normalize(tf.concat(axis=1, values=[h5_relu, classes]),
name="h4_concat_normalize",reuse=scope.reuse)
h6 = tf.layers.dense(h4_concat, units=num_class,
activation=None,
kernel_initializer=self.initializer,
name='dense_3',
reuse=scope.reuse)
return LeakyReLU(self.normalize(h6,name="last_normalize",reuse=scope.reuse))
def generate(self, embedding, classes, scope):
with tf.device(self.device):
ystack = tf.reshape(classes, [self.batch_size,1, 1, self.num_class])
embedding = tf.concat(axis=1, values=[embedding, classes])
h1 = tf.layers.dense(embedding, units=self.dim1, activation=None,
kernel_initializer=self.initializer,
name='dense_1', reuse=scope.reuse)
h1_relu = tf.nn.relu(self.normalize(h1))
h1_concat = tf.concat(axis=1, values=[h1_relu, classes])
h2 = tf.layers.dense(h1_concat, units=self.dim_8*self.dim_8*self.dim2,
activation=None, kernel_initializer=self.initializer,
name='dense_2', reuse=scope.reuse)
h2_relu = tf.nn.relu(self.normalize(h2))
h2_concat = tf.concat(axis=3,
values=[tf.reshape(h2_relu, shape=[self.batch_size,self.dim_8,self.dim_8,self.dim2]),
ystack*tf.ones(shape=[self.batch_size, self.dim_8, self.dim_8,
self.num_class])])
h3 = tf.layers.conv2d_transpose(inputs=h2_concat, filters = self.dim3,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name='conv_1')
h3_relu = tf.nn.relu(self.normalize(h3,flag=True))
# print(h3.get_shape())
h3_concat = tf.concat(axis=3,
values=[tf.reshape(h3_relu, shape=[self.batch_size,self.dim_4,self.dim_4,self.dim3]),
ystack*tf.ones(shape=[self.batch_size, self.dim_4, self.dim_4, self.num_class])])
h4 = tf.layers.conv2d_transpose(inputs=h3_concat, filters = self.dim4,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=tf.nn.relu,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_2")
h4_relu = tf.nn.relu(self.normalize(h4,flag=True))
h4_concat = tf.concat(axis=3,
values=[tf.reshape(h4_relu, shape=[self.batch_size,self.dim_2,self.dim_2,self.dim4]),
ystack*tf.ones(shape=[self.batch_size, self.dim_2, self.dim_2, self.num_class])])
h5 = tf.layers.conv2d_transpose(inputs=h4_concat, filters = 5*self.dim_channel,
kernel_size=[4,4], strides=[2,2], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse,name="conv_3")
h5_relu = tf.nn.relu(self.normalize(h5, flag=True))
h5_concat = tf.concat(axis=3,
values=[h5_relu, ystack*tf.ones(shape=[self.batch_size, self.dim_1, self.dim_1, self.num_class])])
h6 = tf.layers.conv2d_transpose(inputs=h5_concat, filters = self.dim_channel,
kernel_size=[5,5], strides=[1,1], padding='SAME', activation=None,
kernel_initializer=self.initializer,
reuse=scope.reuse, name="conv_4")
return tf.nn.sigmoid(h6)
def samples_generator(self):
with tf.device("/gpu:0"):
batch_size = self.batch_size
embedding = tf.placeholder(tf.float32,[batch_size, self.embedding_size])
classes = tf.placeholder(tf.float32,[batch_size,self.num_class])
with tf.variable_scope("generator") as scope:
scope.reuse_variables()
t = self.generate(embedding,classes,scope)
return embedding,classes,t
# training part
epoch = 100
learning_rate = 1e-2
batch_size = 64
embedding_size = 256
num_class = 20
gan = DCGAN(batch_size=batch_size, embedding_size=embedding_size, image_shape=[64,64,1], num_class=num_class)
embedding, vector, real_image, d_loss, g_loss, prob_fake, prob_real = gan.build_model()
session = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
# relevant weight list
g_weight_list = [i for i in (filter(lambda x: x.name.startswith("gen"),tf.trainable_variables()))]
d_weight_list = [i for i in (filter(lambda x: x.name.startswith("disc"),tf.trainable_variables()))]
print(g_weight_list)
print(d_weight_list)
lr1, lr2, momentum, folder_name = gan.learningR()
g_optimizer = tf.train.AdamOptimizer(lr1,beta1=momentum).minimize(g_loss,var_list=g_weight_list)
d_optimizer = tf.train.AdamOptimizer(lr2,beta1=momentum).minimize(d_loss,var_list=d_weight_list)
saver = tf.train.Saver()
embedding_sample, vector_sample, image_sample = gan.samples_generator()
tf.global_variables_initializer().run()
def generate(batch_size):
batch1, batch1_labels = mnist.train.next_batch(batch_size)
batch1 = batch1.reshape([batch_size, 28, 28, 1])
batch2, batch2_labels = mnist.train.next_batch(batch_size)
batch2 = batch2.reshape([batch_size, 28, 28, 1])
batch = np.zeros([batch_size,64,64,1])
batch[:,2:30,2:30,:] = batch1
batch[:,34:62,34:62,:] = batch2
return (batch, np.concatenate([batch1_labels,batch2_labels],axis=1)/2)
def save_visualization(X, nh_nw, save_path='../%s/dcgan_deep/sample.jpg'%(folder_name)):
h,w = X.shape[1], X.shape[2]
img = np.zeros((h * nh_nw[0], w * nh_nw[1], 3))
for n,x in enumerate(X):
j = n // nh_nw[1]
i = n % nh_nw[1]
img[j*h:j*h+h, i*w:i*w+w, :] = x
np.save("%s.%s"%(save_path.split(".")[0],".npy"), img)
scipy.misc.imsave(save_path, img)
embedding_sample = np.random.uniform(-1,1,size=[batch_size,embedding_size])
vector_sample = np.zeros([batch_size,num_class])
rand = np.random.randint(0,num_class-1,batch_size)
for t in range(batch_size):
vector_sample[t][rand[t]] = 1
sample_ = generate(batch_size)
save_visualization(sample_[0], (8,8))
vector_sample = sample_[1]
embedding_,vector_,image_sample = gan.samples_generator()
print('mnistsamples/sample_%d.jpg'%(batch_size))
for ep in range(epoch):
average_loss = [0,0]
start_time = time.time()
for t in range(64000 // batch_size):
# print(t+1)
batch = generate(batch_size)
random = np.random.uniform(-1,1,size=[batch_size,embedding_size]).astype(np.float32)
feed_dict_1 = {
real_image : batch[0],
embedding : random,
vector : batch[1]
}
feed_dict_2 = {
real_image : batch[0],
embedding : random,
vector : batch[1]
}
# g_loss_val = 0
_,g_loss_val = session.run([g_optimizer,g_loss],feed_dict=feed_dict_2)
_,d_loss_val = session.run([d_optimizer,d_loss],feed_dict=feed_dict_1)
if t%10 == 0 and t>0:
print("Done with batches: " + str(t*batch_size) + "Losses :: Generator: " + str(g_loss_val) + " and Discriminator: " + str(d_loss_val) + " = " + str(d_loss_val + g_loss_val))
print("Saving sample images and data for later testing for epoch: %d"%(ep+1))
feed_dict = {
# real_image : batch[0],
embedding_ : embedding_sample,
vector_ : vector_sample
}
gen_samples = session.run(image_sample,feed_dict=feed_dict)
save_visualization(gen_samples,(8,8),save_path=('../%s/dcgan_deep/sample_%d.jpg'%(folder_name,ep)))
saver.save(session,'./dcgan.ckpt')
print(time.time() - start_time)
print("Saved session")
|
py | b409429c210fe21ff8b565653a5d0c1dcc475a84 | __author__ = 'davidnovogrodsky_wrk'
import socket
# making a TCP connection
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# print out the connection information
print(s)
# act like a browser
# where to connect to
server = 'pythonprogramming.net'
port = 80
server_ip = socket.gethostbyname(server)
print(server_ip)
# make a http request
request = "GET / HTTP/1.1\nHOST: "+server+"\n\n"
# connect to server
s.connect((server,port))
# in python 3 we use strings, not byte strings
# so we need to encode the string before sending
s.send(request.encode())
# size of buffer
# this also does decoding
result = s.recv(4000)
# this prints the entire buffer at once
# and only the contents of the buffer
print(result)
print("\n\n\n")
# prints out the result
# in 1024 chunks
while (len(result) > 0):
print(result)
result= s.recv(1024)
|
py | b40942a62d72a1137661a523373b6f0c4992e3a2 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
from numpy import *
from matplotlib import rcParams
from matplotlib.colors import LogNorm
from math import *
import argparse
import os
import sys
from nema_common import *
SLICE_WIDTH = 2. # in cm
BINS_DISPLACEMENTS = 100
BINS_ANGLES = 90
SUFFIX_COINCIDENCES = "_NECR_COINCIDENCES_short"
SUFFIX_REALTIME = "_NECR_REALTIME_short"
OUTPUT_FORMAT = ""
CALCULATE_SF = False
TEXT_OUTPUT_FILE = "necr_dependency.txt"
class sinogram:
def __init__(self):
self.displacements = []
self.angles = []
self.H = []
self.dedges = []
self.aedges = []
## The function discretizes the position of the hit along the strip.
#
# @param z position of the hit along the strip [cm]
# @param L legnth of the strip [cm]
#
# @return i index of the virtual slice (along the strip)
def z2i (z, L):
N = int(L/2.)
SINOGRAM_WIDTH = L/N # in cm
i = int(floor((z+L/2.)/SINOGRAM_WIDTH))
if i==N:
i=N-1 # TODO: correct the way pf calculating the index at the edge
return i
## The function performs rebinning using the SSRB algorithm.
#
# @param sinograms dictionary of all oblique sinograms
# @param N number of virtual slices
# @param H_shape shape of the hisotgrammed sinogram
#
# @return val dictionary of all rebinned sinograms
def perform_rebinning(sinograms, N, H_shape):
sinograms_rebinned = dict()
indices = linspace(0,2*N-2,2*N-1)/2. # indices of the rebinned sinogram, they may be partial: 0, 0.5, 1, ...
for i in indices:
s = sinogram()
Di = min(i, indices[-1]-i)
H_sum = zeros(H_shape)
for k in xrange(int(Di)+1):
current_slice = int(indices[int(i)]*2.)
couple = []
if Di == int(Di):
couple = sort([current_slice-k, current_slice+k])
else:
couple = sort([current_slice-k, current_slice+k+1])
if ((couple[0],couple[1]) in sinograms.keys()): # checking if oblique sinogram exists in the set of all sinograms
H_sum = H_sum + sinograms[(couple[0],couple[1])].H
s.H = H_sum
sinograms_rebinned[i] = s
return sinograms_rebinned
def perform_analysis(activity, filepath, workdir):
#===========================================
# Matplotlib plotting parameters
#===========================================
rcParams['font.size'] = 24
rcParams['legend.fontsize'] = 18
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
#===========================================
# Length of the scintillators
#===========================================
L = 0.
if "L020" in filepath:
L = 20.
elif "L050" in filepath:
L = 50.
elif "L100" in filepath:
L = 100.
elif "L200" in filepath:
L = 200.
else:
L = 50.
#===========================================
# Number of slices
#===========================================
N = int(L/SLICE_WIDTH)
#===========================================
# Loading coincidences from the file
#===========================================
coincidences = loadtxt(filepath + SUFFIX_COINCIDENCES)
posX1 = coincidences[:,0]
posY1 = coincidences[:,1]
posZ1 = coincidences[:,2]
times1 = coincidences[:,3]
posX2 = coincidences[:,4]
posY2 = coincidences[:,5]
posZ2 = coincidences[:,6]
times2 = coincidences[:,7]
toc = coincidences[:,12]
#===========================================
# Setting the time of the full simulation
#===========================================
time = 0.
realtime_path = filepath + SUFFIX_REALTIME
if os.path.exists(realtime_path):
time = loadtxt(realtime_path)
else:
time = max(max(times1),max(times2))/1e12
#===========================================
# Creating and filling the dictionary of sinograms
#===========================================
sinograms = dict()
# Counters initialization
N_true = 0
N_psca = 0
N_dsca = 0
N_acci = 0
N_all = 0
for i in xrange(len(coincidences)):
# Displacement - distance between LOR and (0,0,0) in XY crossection
displacement = 0.
denominator = sqrt((posY2[i]-posY1[i])**2 + (posX2[i]-posX1[i])**2)
if denominator != 0:
displacement = (posX2[i]*posY1[i]-posY2[i]*posX1[i]) / denominator
# Angle
angle = 0.
if (posX2[i]-posX1[i])!=0:
angle = atan((posY1[i]-posY2[i])/(posX2[i]-posX1[i]))
if displacement>0:
angle=angle+pi/2.
else:
angle=angle+3.*pi/2.
if angle > pi:
angle = angle - pi
displacement = -displacement
cond1 = displacement != 0. or angle != 0.
cond2 = (not isnan(displacement)) and (not isnan(angle))
if cond1 and cond2 and displacement<NEMA_DISPLACEMENT_CUT:
# Counters incrementation
if toc[i]==1: N_true = N_true + 1
elif toc[i]==2: N_psca = N_psca + 1
elif toc[i]==3: N_dsca = N_dsca + 1
elif toc[i]==4: N_acci = N_acci + 1
N_all = N_all + 1
indices = [z2i(posZ1[i],L), z2i(posZ2[i],L)]
indices.sort()
# the dictionary is filled using sorted pair of slice indexes, the event is attached to its sinogram
if (indices[0],indices[1]) in sinograms:
sinograms[(indices[0],indices[1])].displacements.append(displacement)
sinograms[(indices[0],indices[1])].angles.append(angle)
else:
# if there is no such a sinogram, it is created
s = sinogram()
sinograms[(indices[0],indices[1])] = s
sinograms[(indices[0],indices[1])].displacements.append(displacement)
sinograms[(indices[0],indices[1])].angles.append(angle)
#===========================================
# Converting sinograms into 2D histograms
#===========================================
BINS = [BINS_DISPLACEMENTS, BINS_ANGLES]
RANGE = [[0, 12], [0, pi]]
for s in sinograms:
H, dedges, aedges = histogram2d(sinograms[s].displacements, sinograms[s].angles, bins=BINS, range=RANGE)
sinograms[s].H = H
sinograms[s].dedges = dedges
sinograms[s].aedges = aedges
#===========================================
# Rebinning (SSRB)
#===========================================
sinograms_rebinned = perform_rebinning(sinograms, N, H.shape)
#===========================================
# Summing rebinned sinograms
#===========================================
H_final = zeros(H.shape)
for key in sinograms_rebinned:
H_final = H_final + sinograms_rebinned[key].H
#===========================================
# Plotting rebinned sinograms
#===========================================
plt.subplots_adjust(left=0.15, right=0.96, top=0.97, bottom=0.15)
plt.imshow(H_final.T, interpolation='nearest', origin='low', extent=[dedges[0], dedges[-1], aedges[0], aedges[-1]], aspect='auto')
plt.colorbar()
plt.xlabel("Displacement [cm]")
plt.ylabel("Angle [rad.]")
plt.savefig(workdir + "sinogram_final_" + activity + OUTPUT_FORMAT)
plt.clf()
#===========================================
# Summing lines of rebinned final sinogram
#===========================================
step = dedges[1]-dedges[0]
vecsum = zeros(2*BINS_DISPLACEMENTS-1)
for i in xrange(BINS_ANGLES):
vec = H_final[:,i].T
vec = list(vec)
maxind = vec.index(max(vec))
startindex = BINS_DISPLACEMENTS-maxind-1
for j in xrange(len(vec)):
vecsum[startindex+j] = vecsum[startindex+j]+vec[j]
veci = linspace(-BINS_DISPLACEMENTS+1, BINS_DISPLACEMENTS-1, 2*BINS_DISPLACEMENTS-1)
vecd = step*veci
#===========================================
ind_m20mm = BINS_DISPLACEMENTS - int(2./step) - 1
ind_p20mm = BINS_DISPLACEMENTS + int(2./step) - 1
val_m20mm = vecsum[ind_m20mm]
val_p20mm = vecsum[ind_p20mm]
# y = a*x + b
a = float(val_m20mm-val_p20mm)/float(ind_m20mm-ind_p20mm)
b = val_m20mm - (float(val_m20mm-val_p20mm))/(float(ind_m20mm-ind_p20mm))*float(ind_m20mm)
thres = a*(veci+BINS_DISPLACEMENTS-1)+b
# T - above the threshold line (treated as true)
# S - below the threshold line (treated as scattered but includes also the accidental coincidences)
T = 0 # true
S = 0 # scattered (and accidental)
for i in xrange(len(vecsum)):
if vecd[i]<-2 or vecd[i]>2:
S = S + vecsum[i]
else:
if vecsum[i]<thres[i]:
S = S + vecsum[i]
else:
S = S + thres[i]
T = T + vecsum[i] - thres[i]
T = T/(S+T)*N_all
S = S/(S+T)*N_all
#===========================================
plt.subplots_adjust(left=0.15, right=0.96, top=0.97, bottom=0.15)
plt.plot(vecd,vecsum/1000.)
plt.plot([vecd[ind_m20mm],vecd[ind_p20mm]],[thres[ind_m20mm]/1000.,thres[ind_p20mm]/1000.],color='r')
plt.plot([-2,-2],[0,2*thres[ind_m20mm]/1000.],color='k')
plt.plot([2,2],[0,2*thres[ind_p20mm]/1000.],color='k')
plt.xlabel("Displacement [cm]")
plt.ylabel("Kilo counts")
plt.xlim(-12,12) # from NEMA cut
plt.ylim(0,1.1*max(vecsum/1000.))
plt.savefig(workdir + "sumhist_" + activity + OUTPUT_FORMAT)
plt.clf()
plt.close()
#===========================================
if not CALCULATE_SF:
float_activity = float(activity)/22000.*1000 # in kBq/cc (activity is in MBq, volume in cc)
else:
float_activity = 0.001/22000.*1000 # in kBq/cc (activity is in MBq, volume in cc)
SF_sin = S/(S+T)*100. # sin - sinogram
SF_ctr = float(N_dsca+N_psca)/(N_dsca+N_psca+N_true)*100. # ctr - counter
NECR_sin = T*T/(S+T)/time
NECR_ctr = N_true*N_true/N_all/time
NECR_sin = NECR_sin/1000. # cps -> kcps
NECR_ctr = NECR_ctr/1000. # cps -> kcps
ratio_acci = N_acci/float(N_all)
# Printing calculated values
data_for_single_activity = str(float_activity) + "\t"
data_for_single_activity += str(SF_sin) + "\t" + str(SF_ctr) + "\t"
data_for_single_activity += str(NECR_sin) + "\t" + str(NECR_ctr) + "\t"
data_for_single_activity += str(T) + "\t" + str(S) + "\t"
data_for_single_activity += str(N_true) + "\t" + str(N_dsca) + "\t" + str(N_psca) + "\t" + str(N_acci) + "\t"
data_for_single_activity += str(time) + "\t"
data_for_single_activity += str(ratio_acci)
print data_for_single_activity
with open(workdir + TEXT_OUTPUT_FILE, "a") as necr_dependency:
necr_dependency.write(data_for_single_activity + '\n')
#===========================================
tim_diffs = []
ang_diffs = []
for i in xrange(len(coincidences)):
tdiff = abs(times1[i]-times2[i])/1e3 # in ns TODO abs ??????????????
tim_diffs.append(tdiff)
#v=[vx,vy] = posX1, posY1
#u=[ux,uy] = posX2, posY2
vx = posX1[i]
vy = posY1[i]
ux = posX2[i]
uy = posY2[i]
vu = posX1[i]*posX2[i] + posY1[i]*posY2[i]
modv = sqrt(posX1[i]*posX1[i] + posY1[i]*posY1[i])
modu = sqrt(posX2[i]*posX2[i] + posY2[i]*posY2[i])
try:
a = acos(vu/(modv*modu))/pi*180.
adiff = a
except:
pass
ang_diffs.append(adiff)
#===========================================
# Saving 2D histogram into the image
H, xedges, yedges = histogram2d(tim_diffs, ang_diffs, bins=(BINS_DISPLACEMENTS,BINS_ANGLES), range=[[0, 3],[0, 180]])
fig = plt.figure(figsize=(8, 6))
ax = fig.add_subplot(111)
plt.subplots_adjust(left=0.20, right=0.9, top=0.9, bottom=0.1)
VMAX = H.max()
plt.imshow(H.T, interpolation='None', origin='low',
extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]],
aspect='auto', norm=LogNorm(vmin=1, vmax=VMAX))
plt.colorbar()
xxx = linspace(0,ELLIPSE_PARAM,100)
yyy = []
# Ellipse curve
for i in xrange(len(xxx)):
yyy.append(ellipsoid_threshold(xxx[i]))
plt.plot(xxx,yyy,color='red')
plt.xlabel("Time difference [ns]")
plt.ylabel("Angle difference [deg.]")
#plt.show()
plt.ylim(90,180)
plt.savefig(workdir + "2D_differences_" + activity + OUTPUT_FORMAT, bbox_inches='tight')
plt.clf()
plt.close()
def is_coincidences_directory_valid(coincidences_directory):
#TODO
return True
if __name__ == "__main__":
#===========================================
# The script assumes that the files with next activities are in the "directory"
# and they have name matching the pattern: geometry_NECR_activity, for example
# D85_1lay_L050_7mm_NECR_1000 (GOJA format)
#===========================================
parser = argparse.ArgumentParser(description='Calculate sensitivity and sensitivity profiles using the GOJA results.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-cd', '--coincidences-directory',
dest='coincidences_directory',
type=str,
default="/media/pkowalski/TOSHIBA EXT/NCBJ/GATE/NEMA/4_NECR/N0_1000_short/",
help='path to dir with the GOJA sensitivity results')
parser.add_argument('-sw', '--slice-width',
dest='slice_width',
type=float,
default=SLICE_WIDTH,
help='width of the virtual slice')
parser.add_argument('-bd', '--bins-displacements',
dest='bins_displacements',
type=float,
default=BINS_DISPLACEMENTS,
help='nr of bins for displacements')
parser.add_argument('-ba' ,'--bins-angles',
dest='bins_angles',
type=float,
default=BINS_ANGLES,
help='nr of bins for angles')
parser.add_argument('--scatter-fraction',
dest='scatter_fraction',
action='store_true',
help='use to calculate the SF using the set of 1 kBq simulations')
parser.add_argument('-of', '--outputformat',
dest='outputformat',
type=str,
default="png",
help='output format of images')
args = parser.parse_args()
OUTPUT_FORMAT = "." + args.outputformat
if not args.coincidences_directory:
print "No directory with coincidences provided. Analysis cannot be performed. Check --help option."
sys.exit()
elif not os.path.isdir(args.coincidences_directory):
print "Directory " + args.coincidences_directory + " is unavailable. Check --help option."
sys.exit()
elif not is_coincidences_directory_valid(args.coincidences_directory):
print "Directory " + args.coincidences_directory + " is not valid. It should contain coincidences files with proper names. Check --help option."
sys.exit()
if args.scatter_fraction:
CALCULATE_SF = True
TEXT_OUTPUT_FILE = "sf.txt"
SUFFIX_COINCIDENCES = "SF_COINCIDENCES_short"
SUFFIX_REALTIME = "SF_REALTIME_short"
activities_NECR = [""]
SLICE_WIDTH = args.slice_width
BINS_DISPLACEMENTS = int(args.bins_displacements)
BINS_ANGLES = int(args.bins_angles)
directory = args.coincidences_directory
create_work_directories()
for geometry in geometries_NECR:
print geometry
workdir = workdir_NECR + geometry + "/"
if (not os.path.isdir(workdir)):
os.system("mkdir " + workdir)
if (os.path.isfile(workdir + TEXT_OUTPUT_FILE)):
os.system("rm " + workdir + TEXT_OUTPUT_FILE)
for i in xrange(len(activities_NECR)):
activity = activities_NECR[i]
filepath = directory + geometry + "_" + activity
perform_analysis(activity, filepath, workdir)
|
py | b40942bf3d72d67a14bc2717c8f3f4e9f208e6c9 | import zlib
from ._json import _CompactJSON
from .encoding import base64_decode
from .encoding import base64_encode
from .exc import BadPayload
from .serializer import Serializer
from .timed import TimedSerializer
class URLSafeSerializerMixin(object):
"""Mixed in with a regular serializer it will attempt to zlib
compress the string to make it shorter if necessary. It will also
base64 encode the string so that it can safely be placed in a URL.
"""
default_serializer = _CompactJSON
def load_payload(self, payload, *args, **kwargs):
decompress = False
if payload.startswith(b"."):
payload = payload[1:]
decompress = True
try:
json = base64_decode(payload)
except Exception as e:
raise BadPayload(
"Could not base64 decode the payload because of an exception",
original_error=e,
)
if decompress:
try:
json = zlib.decompress(json)
except Exception as e:
raise BadPayload(
"Could not zlib decompress the payload before decoding the payload",
original_error=e,
)
return super(URLSafeSerializerMixin, self).load_payload(json, *args, **kwargs)
def dump_payload(self, obj):
json = super(URLSafeSerializerMixin, self).dump_payload(obj)
is_compressed = False
compressed = zlib.compress(json)
if len(compressed) < (len(json) - 1):
json = compressed
is_compressed = True
base64d = base64_encode(json)
if is_compressed:
base64d = b"." + base64d
return base64d
class URLSafeSerializer(URLSafeSerializerMixin, Serializer):
"""Works like :class:`.Serializer` but dumps and loads into a URL
safe string consisting of the upper and lowercase character of the
alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
class URLSafeTimedSerializer(URLSafeSerializerMixin, TimedSerializer):
"""Works like :class:`.TimedSerializer` but dumps and loads into a
URL safe string consisting of the upper and lowercase character of
the alphabet as well as ``'_'``, ``'-'`` and ``'.'``.
"""
|
py | b40943036d0df706183b42995c94308bf74f4347 | from django.test import TestCase
from bot.exceptions import HelpfulError
class HelpfulErrorTest(TestCase):
"""
Checks if HelpfulError is raised correctly.
"""
def test_correct_format(self):
"""
Translation works correctly.
"""
msg = "\nAn error ocurred\n\tProblem: Test Error Msg\n\n\tSolution: Just keep going.\n\n"
ex = HelpfulError(issue="Test Error Msg", solution="Just keep going.")
self.assertEqual(msg, ex.message, "Message has incorrect format.")
|
py | b409433b55c0e78161ce1f47733f01b9fdaddd24 | from stage import *
class Reshape(Stage):
def __init__(self, reshapeFn, inputNames=None, outputDim=0, name=None, outputdEdX=True):
Stage.__init__(self, name=name, inputNames=inputNames, outputDim=outputDim, outputdEdX=outputdEdX)
self.reshapeFn = eval('lambda x: ' + reshapeFn)
self.Xshape = 0
def forward(self, X):
self.Xshape = X.shape
return np.reshape(X, self.reshapeFn(X.shape))
def backward(self, dEdY):
if self.outputdEdX:
return np.reshape(dEdY, self.Xshape)
class TimeUnfold(Reshape):
def __init__(self, inputNames=None, name=None, outputdEdX=True):
Reshape.__init__(self,
name=name,
inputNames=inputNames,
reshapeFn='(x[0] * x[1], x[2])',
outputdEdX=outputdEdX)
class TimeFold(Reshape):
def __init__(self, timespan, inputNames=None, name=None, outputdEdX=True):
self.timespan = timespan
t = str(self.timespan)
Reshape.__init__(self,
name=name,
inputNames=inputNames,
reshapeFn='(x[0] / '+t+','+t+', x[1])',
outputdEdX=outputdEdX)
class TimeReverse(Stage):
def __init__(self, inputNames, outputDim=0, name=None, outputdEdX=True):
Stage.__init__(self,
name=name,
inputNames=inputNames,
outputDim=outputDim,
outputdEdX=outputdEdX)
def forward(self, X):
#print self.name, X.shape
N = X.shape[0]
self.Xend = np.zeros(N, dtype=int) + X.shape[1]
reachedEnd = np.sum(X, axis=-1) == 0.0
Y = np.zeros(X.shape)
# Scan for the end of the sequence.
for n in range(N):
found = False
for t in range(X.shape[1]):
if reachedEnd[n, t]:
self.Xend[n] = t
if t > 0:
found = True
Y[n, 0:t, :] = X[n, t-1::-1, :]
break
if found == False:
self.Xend[n] = X.shape[1]
Y[n, :, :] = X[n, ::-1, :]
return Y
def backward(self, dEdY):
if self.outputdEdX:
dEdX = np.zeros(dEdY.shape)
for n in range(dEdY.shape[0]):
t = self.Xend[n]
if t > 0:
dEdX[n, 0:t, :] = dEdY[n, t-1::-1, :]
return dEdX
else:
return None
class TimeRepeat(Stage):
def __init__(self, numRepeats, inputNames=None, outputDim=0, name=None, outputdEdX=True):
Stage.__init__(self, name=name, inputNames=inputNames, outputDim=outputDim, outputdEdX=outputdEdX)
self.numRepeats = numRepeats
def forward(self, X):
self.Xshape = X.shape
if len(X.shape) == 2:
X = X.reshape(X.shape[0], 1, X.shape[1])
return np.tile(X, (1, self.numRepeats, 1))
def backward(self, dEdY):
if self.outputdEdX:
dEdY = dEdY.reshape(
dEdY.shape[0], self.numRepeats, dEdY.shape[1] / self.numRepeats, dEdY.shape[2])
dEdX = np.sum(dEdY, axis=1)
if len(self.Xshape) == 2:
dEdX = dEdX.reshape(dEdX.shape[0], dEdX.shape[-1])
return dEdX
class TimeFinal(Stage):
"""
Scans and selects the last timestep.
"""
def __init__(self, inputNames, outputDim=0, name=None, outputdEdX=True):
Stage.__init__(self,
name=name,
inputNames=inputNames,
outputDim=outputDim,
outputdEdX=outputdEdX)
self.Xend = 0.0
def forward(self, X):
N = X.shape[0]
self.X = X
self.Xend = np.zeros(N, dtype=int) + X.shape[1]
reachedEnd = np.sum(X, axis=-1) == 0.0
Y = np.zeros((N, X.shape[-1]))
# Scan for the end of the sequence.
for n in range(N):
for t in range(X.shape[1]):
if reachedEnd[n, t]:
self.Xend[n] = t
break
for n in range(N):
if self.Xend[n] > 0:
Y[n] = X[n, self.Xend[n] - 1]
return Y
def backward(self, dEdY):
if self.outputdEdX:
dEdX = np.zeros(self.X.shape)
for n in range(dEdY.shape[0]):
if self.Xend[n] > 0:
dEdX[n, self.Xend[n] - 1, :] = dEdY[n]
return dEdX
else:
return None
class Concat(Stage):
def __init__(self, inputNames, axis, name=None):
Stage.__init__(self, name=name, inputNames=inputNames, outputDim=0)
self.axis = axis
def getInput(self):
if len(self.inputs) > 1:
self.splX = []
for stage in self.inputs:
X = stage.Y
self.splX.append(X)
return np.concatenate(self.splX, axis=self.axis)
else:
return self.inputs[0].Y
def sendError(self, dEdX):
"""
Iterates over input list and sends dEdX.
"""
if len(self.inputs) > 1:
s = 0
for stage in self.inputs:
s2 = s + stage.Y.shape[self.axis]
if self.axis == 0:
stage.dEdY += dEdX[s : s2]
elif self.axis == 1:
stage.dEdY += dEdX[:, s : s2]
elif self.axis == 2:
stage.dEdY += dEdX[:, :, s : s2]
s = s2
stage.receivedError = True
else:
self.inputs[0].dEdY += dEdX
self.inputs[0].receivedError = True
def forward(self, X):
return X
def backward(self, dEdY):
return dEdY
|
py | b40944468ad06883d371b3f282ece40a51851199 | #! /usr/bin/env python
if 0:
fl1="adam_svnlogger.log"
fl2="adam_notes-svn_not_under_version_control_files.log"
## this particular example is meant to take all of the lines in fl1 that are NOT in fl2 and put them in fl_out
fl_out="adam_notes-svn_useful.log"
fo1=open(fl1,'r')
fo2=open(fl2,'r')
lines1=fo1.readlines()
lines2=fo2.readlines()
lines_out=[]
for l1 in lines1:
if not l1 in lines2:
lines_out.append(l1)
fo_out=open(fl_out,'w')
fo_out.writelines(lines_out)
fo_out.close()
import commands
import string
fl1="adam_notes-potentially_useful_files.log"
fo1=open(fl1,'r')
lines1=fo1.readlines()
lines1=map(string.strip,lines1)
photoz_or_bpz_files=[]
for l1 in lines1:
if "photoz" in l1 or "bpz" in l1:
photoz_or_bpz_files.append(l1)
command="wc -l "+l1
out = commands.getoutput(command)
print l1,out
print photoz_or_bpz_files
fl1="APER0-1_files_quick.log"
fl2="APER-files_quick.log"
## this particular example is meant to take all of the lines in fl1 and fl2 and put them in fl_out
fl_out="adam_notes-APER0-1_and_APER.log"
fo1=open(fl1,'r')
fo2=open(fl2,'r')
lines1=fo1.readlines()
lines2=fo2.readlines()
lines_out=[]
for l1 in lines1:
if l1 in lines2:
lines_out.append(l1)
fo_out=open(fl_out,'w')
fo_out.writelines(lines_out)
fo_out.close()
|
py | b40945438d39ab5c97601ebc6e489c426f0d5c3b | from everything import *
import sys, time, requests, string
DL_TOKEN = { 'Authorization' : 'DirectLogin token=' }
CONTENT_JSON = { 'content-type' : 'application/json' }
class BankingApi:
def __init__(self, cfg):
self.cfg = cfg
def mergeHeaders(self, x, y):
z = x.copy()
z.update(y)
return z
def log(self, m):
print(m)
def setToken(self, t):
global DL_TOKEN
DL_TOKEN = { 'Authorization' : 'DirectLogin token=%s' % t}
def login(self, username, password):
login_url = '{0}/my/logins/direct'.format(self.cfg['bank']['base_url'])
auth_body = 'DirectLogin username={0},password={1},consumer_key={2}'\
.format(username, password,\
self.cfg['bank']['consumer_key'])
login_header = { 'Authorization' : auth_body}
self.log('Login as {0} to {1}'.format(login_header, login_url))
r = requests.get(login_url, headers=login_header)
if (r.status_code != 200):
self.log("error: could not login")
self.log("text: " + r.text)
return r.text
t = r.json()['token']
self.log("Received token: {0}".format(t))
self.setToken(t)
return t
def getBanks(self):
# Prepare headers
response = requests.get(u"{0}/obp/{1}/banks"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version']),\
headers=self.mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()['banks']
def getAccounts(self):
list_of_accounts = []
response = requests.get(u"{0}/obp/{1}/accounts"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version']),\
headers=self.mergeHeaders(DL_TOKEN,CONTENT_JSON))
for i in response.json():
for j in i['views_available']:
if type(j) == dict:
if j['is_public'] == False:
list_of_accounts.append(i)
result = "Your accounts are "
for i in list_of_accounts:
result += 'bank ' + i['bank_id'] + ' '
result += 'with id ' + i['id'] + ' '
return result
#Gets all private accounts and their information
def getPrivateAccounts(self, bank):
# Prepare headers
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/private"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version'],\
bank),\
headers=self.mergeHeaders(DL_TOKEN, CONTENT_JSON))
return response.json()['accounts']
#Goest through all private accounts and just pulls out their names
def getPrivateAccountNames(self, bank):
accounts = []
result = "Your accounts are "
for i in self.getPrivateAccounts(bank):
accounts.append(i['id'])
for i in accounts:
result += i + ' '
return result
#Gets the amount of the balance in an account
def getBalance(self, bank, account):
response = requests.get(u"{0}/obp/{1}/my/banks/{2}/accounts/{3}/account"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version'],\
bank,\
account),\
headers=self.mergeHeaders(DL_TOKEN, CONTENT_JSON))
balance = response.json()['balance']['amount']
result = 'The balance for your {0} accounnt is ${1}'.format(account, balance)
return result
#Gets all transactions made on an account
def getNumberOfTransactions(self, bank, account):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version'],\
bank,\
account),\
headers=self.mergeHeaders(DL_TOKEN, CONTENT_JSON))
result = 'You have had ' + str(len(response.json()['transactions'])) + ' transactions'
return result
def getTransactionNumber(self, bank, account, transaction_number):
transaction_number -= 1
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version'],\
bank,\
account),\
headers=self.mergeHeaders(DL_TOKEN, CONTENT_JSON))
result = 'Transaction ' + str(transaction_number) + "was made on "\
+ response.json()['transactions'][transaction_number]['details']['completed']\
.split('T')[0] + ' with an amount of $'\
+ response.json()\
['transactions'][transaction_number]['details']['value']['amount']
return result
def getMostRecentTransaction(self,bank,account):
response = requests.get(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version'],\
bank,\
account),\
headers=self.mergeHeaders(DL_TOKEN, CONTENT_JSON))
result = 'Your most recent transaction was made on ' + response.json()['transactions'][0]['details']['completed'].split('T')[0] + ' with an amount of ' + response.json()['transactions'][0]['details']['value']['amount']
return result
def makePayment(self, mybank, myaccount, otheraccount, amount):
#print self.getPrivateAccountNames(self.cfg['bank']['bank_id'])
if otheraccount in self.getPrivateAccounts(self.cfg['bank']['bank_id']):
post_data = {
"account_id" : '%s' % otheraccount,\
"bank_id" : '%s' % mybank,\
"amount" : '%s' % amount
}
res = requests.post(u"{0}/obp/{1}/banks/{2}/accounts/{3}/owner/transactions"\
.format(self.cfg['bank']['base_url'],\
self.cfg['bank']['api_version'],\
mybank, myaccount),\
json=post_data,\
headers=self.mergeHeaders(DL_TOKEN,CONTENT_JSON))
print res.content
result = "You have made a payment to bank " + mybank + " account "\
+ " with an amount of $" + amount
else:
result = "Sorry you do not have an account named {}".format(otheraccount)
return result
def changeAccounts(self, new_account):
self.cfg['bank']['account'] = new_account
return "Account switched to {}. What do you want to know".format(new_account)
|
py | b409479563fc8f4aea512e6909f18957de124e4f | """
For permutation cycles [[0, 1], [0, 2]]
"""
import matplotlib.pyplot as plt
LEVEL = 4
level_1_lambdas = [-2. + 0.j, 4. + 0.j, 2. + 0.j]
level_2_lambdas = [-2. + 0.j, -0.73205081 + 0.j, 4. + 0.j, 2.73205081 + 0.j,
-1.64575131 + 0.j, 3.64575131 + 0.j, 2. + 0.j, 2. + 0.j, 2. + 0.j]
level_3_lambdas = [-2. + 0.j, -1.78065654 + 0.j, -1.06590154 + 0.j, -0.73205081 + 0.j,
2.73205081 + 0.j, 3.06590154 + 0.j, 4. + 0.j, 3.78065654 + 0.j,
-1.94036585 + 0.j, -0.83146081 + 0.j, 3.94036585 + 0.j, 2.83146081 + 0.j,
-1.64575131 + 0.j, -1.64575131 + 0.j, 3.64575131 + 0.j, -1.64575131 + 0.j,
3.64575131 + 0.j, 3.64575131 + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j, 2. + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j, 2. + 0.j, 2. + 0.j]
level_4_lambdas = [3.78065654 + 0.j, 3.84005309 + 0.j, 4. + 0.j, 3.96321726 + 0.j,
3.06590154 + 0.j, 2.73205081 + 0.j, -1.06590154 + 0.j, -0.98345619 + 0.j,
-0.73205081 + 0.j, 3.99004446 + 0.j, 3.94036585 + 0.j, -2. + 0.j,
-1.84005309 + 0.j, 2.98345619 + 0.j, 2.7942529 + 0.j, -0.7942529 + 0.j,
-1.78065654 + 0.j, -1.96321726 + 0.j, 3.79847473 + 0.j, -1.04170007 + 0.j,
3.04170007 + 0.j, 2.749181 + 0.j, -0.749181 + 0.j, -1.99004446 + 0.j,
-1.79847473 + 0.j, 2.83146081 + 0.j, 3.64575131 + 0.j, -0.83146081 + 0.j,
3.94036585 + 0.j, 2. + 0.j, -0.83146081 + 0.j, 3.64575131 + 0.j,
-1.94036585 + 0.j, 2. + 0.j, 2.83146081 + 0.j, -1.64575131 + 0.j,
-1.94036585 + 0.j, 3.94036585 + 0.j, 2. + 0.j, -1.64575131 + 0.j,
-1.94036585 + 0.j, -1.64575131 + 0.j, -1.64575131 + 0.j, 3.64575131 + 0.j,
-0.83146081 + 0.j, 2. + 0.j, 2.83146081 + 0.j, 3.64575131 + 0.j,
2. + 0.j, 3.64575131 + 0.j, 3.64575131 + 0.j, 2. + 0.j,
2. + 0.j, -1.64575131 + 0.j, -1.64575131 + 0.j, -1.64575131 + 0.j,
3.64575131 + 0.j, 3.64575131 + 0.j, 2. + 0.j, 2. + 0.j,
-1.64575131 + 0.j, -1.64575131 + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j, 3.64575131 + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j, 2. + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j, 2. + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j, 2. + 0.j, 2. + 0.j, 2. + 0.j,
2. + 0.j]
###############################################################################
# For listing out elements in given sets - for testing purposes only
def lister(lis):
for i in range(len(lis)):
print(lis[i].real)
return
# For converting eigenvalue output sets to more usable format (only for undirected graphs)
def list_maker(lis):
made_list = []
for i in range(len(lis)):
made_list.append(lis[i].real)
return made_list
# Counts eigenvalue multiplicities and labels each element w multiplicity and schreier graph level
def element_counter_and_labeler(lis, level):
count_list = []
full_list = []
total_multiplicity = len(lis)
for i in range(len(lis)):
count_list.append(0)
for j in range(len(lis)):
if lis[i] == lis[j]:
count_list[i] += 1
else:
continue
for i in range(len(lis)):
full_list.append((lis[i], count_list[i]/total_multiplicity, level))
list_final = list(set(full_list))
return list_final
###############################################################################
level_1_list = element_counter_and_labeler(list_maker(level_1_lambdas), 1)
level_2_list = element_counter_and_labeler(list_maker(level_2_lambdas), 2)
level_3_list = element_counter_and_labeler(list_maker(level_3_lambdas), 3)
level_4_list = element_counter_and_labeler(list_maker(level_4_lambdas), 4)
super_list = [level_1_list, level_2_list, level_3_list, level_4_list]
###############################################################################
# for picking apart tuple elements from element_counter_and_labeler to use for plotting
def element_label(lis, num):
x = []
for i in range(len(lis)):
x.append(lis[i][num])
return x
# for ordering eigenvalue lists by their level
def level_sort_key(lst):
return lst[2]
# for finding which eigenvalues generate at which level of the graph
def eigen_level(lis):
diff_lvl_same_eigen = []
for i in range(LEVEL):
for j in range(LEVEL):
for k in range(len(lis[i])):
for l in range(len(lis[j])):
if lis[i][k][0] == lis[j][l][0]:
a = lis[i][k]
b = lis[j][l]
diff_lvl_same_eigen.append(a)
diff_lvl_same_eigen.append(b)
else:
continue
no_dups = list(set(diff_lvl_same_eigen))
combined_list = []
for i in range(len(no_dups)):
eigen = no_dups[i][0]
same_eigen = []
for j in range(len(no_dups)):
if eigen == no_dups[j][0]:
same_eigen.append(no_dups[j])
same_eigen.sort(key=level_sort_key)
combined_list.append(tuple(same_eigen))
final_list = list(set(combined_list))
for i in range(len(final_list)):
final_list[i] = list(final_list[i])
return final_list
#removes eigenvalues from eigen_level list that appear at the last level of Schreier graph to make plotting better
def shortener(lis):
shortened_list = []
for i in range(len(lis)):
if len(lis[i]) == 1:
continue
else:
shortened_list.append(lis[i])
return shortened_list
continuity_lines_list = shortener(eigen_level(super_list))
# holdover from v1 - basically here to set axis bounds for now
x_4 = element_label(level_4_list, 0)
y_4 = element_label(level_4_list, 1)
# syntax for 3-D projection
ax = plt.axes(projection='3d')
# axes sizes (adjusted for best viewing experience)
ax.set_xlim3d(min(x_4), max(x_4))
ax.set_ylim3d(1, 4)
ax.set_zlim3d(0, max(y_4))
# listing each set of level eigenvalues
for i in range(len(super_list)):
x = element_label(super_list[i], 0)
y = element_label(super_list[i], 1)
z = element_label(super_list[i], 2)
ax.scatter(x, y, z, zdir='y')
# plotting lines connecting same eigenvalues throughout levels
for i in range(len(continuity_lines_list)):
x = element_label(continuity_lines_list[i], 0)
y = element_label(continuity_lines_list[i], 1)
z = element_label(continuity_lines_list[i], 2)
ax.plot(x,y,z, zdir='y')
# graph labeling
ax.set_title('Spectrum Distribution')
ax.set_xlabel('Eigenvalue')
ax.set_ylabel('Level of Schreier Graph')
ax.set_zlabel('Relative Multiplicity')
plt.show()
|
py | b4094796bbe2671c978213867b383aa77e509e4a | from abc import abstractmethod
from typing import Dict, Optional, Union
from bui.specific.base import *
class SpecificWindow(SpecificWidget):
"""Parent class for a specific window object."""
widget_name = "window"
@property
@abstractmethod
def usable_surface(self):
"""
Return the screen size that can be used, in pixels.
This size is returned in a tuple: (width, height), so that
(x, y) follow the exact same pattern. Both components are integers.
Note that this is the screen surface being "free", that is,
not counting the taskbar on some operating systems, since
we cannot draw on that. Therefore, the usable surface tends
to be somewhat narrower than the screen resolution.
"""
pass
@property
@abstractmethod
def title(self):
"""Return the current title, override in child class."""
pass
@title.setter
@abstractmethod
def title(self, new_title):
"""Set the window's title, override in child class."""
pass
def _init(self):
"""Initialize the specific widget."""
self.title = self.generic.leaf.title
@abstractmethod
def _start(self, loop):
"""
Start the window, watch events and allow async loop.
Args:
loop (AsyncLoop): the asynchronous event loop (see asyncio).
"""
pass
@abstractmethod
@abstractmethod
def close(self):
"""Close this window, terminate loop if appropriate."""
pass
@abstractmethod
def add_widget(self, widget: SpecificWidget):
"""
Add a widget on the window.
Args:
widget (SpecificWidget): the specific widget to add.
"""
pass
@abstractmethod
def pop_dialog(self, dialog: SpecificWidget, **kwargs):
"""Pop up a dialog."""
pass
@abstractmethod
def pop_menu(self, context: SpecificWidget):
"""Pop a context menu, blocks until the menu is closed."""
pass
@abstractmethod
def pop_alert(self, title: str, message: str,
danger: str, buttons: Dict[str, Union[bool, str]],
default: str):
"""
Display an alert message.
Args:
title (str): the alert title.
message (str): the alert message.
danger (str): the alert danger (dialog type).
buttons (dict): the buttons of this dialog.
default (str): the default button for this dialog.
"""
pass
|
py | b4094829ba8e0d2a0fd11565459133b05e68bb1d | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import uuid
from dataclasses import dataclass, field
from typing import Dict, Iterable, List
import requests
from redash_toolbelt import Redash
from metadata.generated.schema.entity.data.chart import Chart
from metadata.generated.schema.entity.services.dashboardService import (
DashboardServiceType,
)
from metadata.generated.schema.type.entityReference import EntityReference
from metadata.ingestion.api.common import ConfigModel, Entity, WorkflowContext
from metadata.ingestion.api.source import Source, SourceStatus
from metadata.ingestion.models.table_metadata import Chart as ModelChart
from metadata.ingestion.models.table_metadata import Dashboard
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig
from metadata.utils.helpers import get_dashboard_service_or_create
class RedashSourceConfig(ConfigModel):
uri: str = "http://localhost:5000"
username: str = ""
api_key: str
service_name: str
service_type: str = DashboardServiceType.Redash.value
@dataclass
class RedashSourceStatus(SourceStatus):
items_scanned: int = 0
filtered: List[str] = field(default_factory=list)
def item_scanned_status(self) -> None:
self.items_scanned += 1
def item_dropped_status(self, item: str) -> None:
self.filtered.append(item)
class RedashSource(Source[Entity]):
config: RedashSourceConfig
metadata_config: MetadataServerConfig
status: RedashSourceStatus
platform = "redash"
dashboards_to_charts: Dict[str, List[str]]
def __init__(
self,
config: RedashSourceConfig,
metadata_config: MetadataServerConfig,
ctx: WorkflowContext,
):
super().__init__(ctx)
self.config = config
self.metadata_config = metadata_config
self.status = RedashSourceStatus()
self.client = Redash(self.config.uri, self.config.api_key)
self.service = get_dashboard_service_or_create(
service_name=config.service_name,
dashboard_service_type=DashboardServiceType.Redash.name,
username=config.username,
password=config.api_key,
dashboard_url=config.uri,
metadata_config=metadata_config,
)
self.dashboards_to_charts = {}
@classmethod
def create(
cls, config_dict: dict, metadata_config_dict: dict, ctx: WorkflowContext
):
config = RedashSourceConfig.parse_obj(config_dict)
metadata_config = MetadataServerConfig.parse_obj(metadata_config_dict)
return cls(config, metadata_config, ctx)
def prepare(self):
pass
def next_record(self) -> Iterable[Entity]:
yield from self.get_redash_charts()
dashboard_info = self.client.dashboards()
yield from self.get_redash_dashboard_charts(dashboard_info)
yield from self.get_redash_dashboard(dashboard_info)
def get_redash_charts(self) -> Chart:
query_info = self.client.queries()
for query_info in query_info["results"]:
query_id = query_info["id"]
query_name = query_info["name"]
query_data = requests.get(
f"{self.config.uri}/api/queries/{query_id}"
).json()
for visualization in query_data.get("Visualizations", []):
chart_type = visualization.get("type", "")
chart_description = (
visualization.get("description", "")
if visualization.get("description", "")
else ""
)
yield Chart(
id=uuid.uuid4(),
name=query_id,
displayName=query_name,
chartType=chart_type,
service=EntityReference(
id=self.service.id, type="dashboardService"
),
description=chart_description,
)
def get_redash_dashboard_charts(self, dashboard_info) -> Chart:
for dashboard_info in dashboard_info["results"]:
dashboard_id = dashboard_info["id"]
if dashboard_id is not None:
dashboard_data = self.client.dashboard(dashboard_info["slug"])
self.dashboards_to_charts[dashboard_id] = []
for widgets in dashboard_data.get("widgets", []):
visualization = widgets.get("visualization")
self.dashboards_to_charts[dashboard_id].append(widgets["id"])
yield ModelChart(
name=widgets["id"],
displayName=visualization["query"]["name"],
chart_type=visualization["type"],
service=EntityReference(
id=self.service.id, type="dashboardService"
),
url=(
f"{self.config.uri}/dashboard/{dashboard_data.get('slug', '')}"
),
description=visualization["description"],
)
def get_redash_dashboard(self, dashboard_info) -> Dashboard:
for dashboard_info in dashboard_info["results"]:
dashboard_id = dashboard_info["id"]
if dashboard_id is not None:
self.status.item_scanned_status()
dashboard_data = self.client.dashboard(dashboard_info["slug"])
dashboard_url = (
f"{self.config.uri}/dashboard/{dashboard_data.get('slug', '')}"
)
for widgets in dashboard_data.get("widgets", []):
dashboard_description = widgets.get("text")
yield Dashboard(
id=uuid.uuid4(),
name=dashboard_id,
displayName=dashboard_info["name"],
description=dashboard_description if dashboard_info else "",
charts=self.dashboards_to_charts[dashboard_id],
usageSummary=None,
service=EntityReference(
id=self.service.id, type="dashboardService"
),
url=dashboard_url,
)
def get_status(self) -> SourceStatus:
return self.status
def close(self):
self.client.session.close()
|
py | b4094a569f2234ff70fa24c3238c81245627b492 | # Copyright (c) 2012-2014, Michael DeHaan <[email protected]>
# Copyright (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
inventory: script
version_added: "2.4"
short_description: Executes an inventory script that returns JSON
options:
cache:
deprecated:
why: This option has never been in use. External scripts must implement their own caching.
version: "2.12"
description:
- This option has no effect. The plugin will not cache results because external inventory scripts
are responsible for their own caching. This option will be removed in 2.12.
ini:
- section: inventory_plugin_script
key: cache
env:
- name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_CACHE
always_show_stderr:
description: Toggle display of stderr even when script was successful
version_added: "2.5.1"
default: True
type: boolean
ini:
- section: inventory_plugin_script
key: always_show_stderr
env:
- name: ANSIBLE_INVENTORY_PLUGIN_SCRIPT_STDERR
description:
- The source provided must be an executable that returns Ansible inventory JSON
- The source must accept C(--list) and C(--host <hostname>) as arguments.
C(--host) will only be used if no C(_meta) key is present.
This is a performance optimization as the script would be called per host otherwise.
notes:
- Whitelisted in configuration by default.
- The plugin does not cache results because external inventory scripts are responsible for their own caching.
'''
import os
import subprocess
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.module_utils.basic import json_dict_bytes_to_unicode
from ansible.module_utils.six import iteritems
from ansible.module_utils._text import to_native, to_text
from ansible.module_utils.common._collections_compat import Mapping
from ansible.plugins.inventory import BaseInventoryPlugin, Cacheable
from ansible.utils.display import Display
display = Display()
class InventoryModule(BaseInventoryPlugin, Cacheable):
''' Host inventory parser for ansible using external inventory scripts. '''
NAME = 'script'
def __init__(self):
super(InventoryModule, self).__init__()
self._hosts = set()
def verify_file(self, path):
''' Verify if file is usable by this plugin, base does minimal accessibility check '''
valid = super(InventoryModule, self).verify_file(path)
if valid:
# not only accessible, file must be executable and/or have shebang
shebang_present = False
try:
with open(path, 'rb') as inv_file:
initial_chars = inv_file.read(2)
if initial_chars.startswith(b'#!'):
shebang_present = True
except Exception:
pass
if not os.access(path, os.X_OK) and not shebang_present:
valid = False
return valid
def parse(self, inventory, loader, path, cache=None):
super(InventoryModule, self).parse(inventory, loader, path)
self.set_options()
if self.get_option('cache') is not None:
display.deprecated(
msg="The 'cache' option is deprecated for the script inventory plugin. "
"External scripts implement their own caching and this option has never been used",
version="2.12", collection_name='ansible.builtin'
)
# Support inventory scripts that are not prefixed with some
# path information but happen to be in the current working
# directory when '.' is not in PATH.
cmd = [path, "--list"]
try:
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleParserError("problem running %s (%s)" % (' '.join(cmd), to_native(e)))
(stdout, stderr) = sp.communicate()
path = to_native(path)
err = to_native(stderr or "")
if err and not err.endswith('\n'):
err += '\n'
if sp.returncode != 0:
raise AnsibleError("Inventory script (%s) had an execution error: %s " % (path, err))
# make sure script output is unicode so that json loader will output unicode strings itself
try:
data = to_text(stdout, errors="strict")
except Exception as e:
raise AnsibleError("Inventory {0} contained characters that cannot be interpreted as UTF-8: {1}".format(path, to_native(e)))
try:
processed = self.loader.load(data, json_only=True)
except Exception as e:
raise AnsibleError("failed to parse executable inventory script results from {0}: {1}\n{2}".format(path, to_native(e), err))
# if no other errors happened and you want to force displaying stderr, do so now
if stderr and self.get_option('always_show_stderr'):
self.display.error(msg=to_text(err))
if not isinstance(processed, Mapping):
raise AnsibleError("failed to parse executable inventory script results from {0}: needs to be a json dict\n{1}".format(path, err))
group = None
data_from_meta = None
# A "_meta" subelement may contain a variable "hostvars" which contains a hash for each host
# if this "hostvars" exists at all then do not call --host for each # host.
# This is for efficiency and scripts should still return data
# if called with --host for backwards compat with 1.2 and earlier.
for (group, gdata) in processed.items():
if group == '_meta':
if 'hostvars' in gdata:
data_from_meta = gdata['hostvars']
else:
self._parse_group(group, gdata)
for host in self._hosts:
got = {}
if data_from_meta is None:
got = self.get_host_variables(path, host)
else:
try:
got = data_from_meta.get(host, {})
except AttributeError as e:
raise AnsibleError("Improperly formatted host information for %s: %s" % (host, to_native(e)), orig_exc=e)
self._populate_host_vars([host], got)
except Exception as e:
raise AnsibleParserError(to_native(e))
def _parse_group(self, group, data):
group = self.inventory.add_group(group)
if not isinstance(data, dict):
data = {'hosts': data}
# is not those subkeys, then simplified syntax, host with vars
elif not any(k in data for k in ('hosts', 'vars', 'children')):
data = {'hosts': [group], 'vars': data}
if 'hosts' in data:
if not isinstance(data['hosts'], list):
raise AnsibleError("You defined a group '%s' with bad data for the host list:\n %s" % (group, data))
for hostname in data['hosts']:
self._hosts.add(hostname)
self.inventory.add_host(hostname, group)
if 'vars' in data:
if not isinstance(data['vars'], dict):
raise AnsibleError("You defined a group '%s' with bad data for variables:\n %s" % (group, data))
for k, v in iteritems(data['vars']):
self.inventory.set_variable(group, k, v)
if group != '_meta' and isinstance(data, dict) and 'children' in data:
for child_name in data['children']:
child_name = self.inventory.add_group(child_name)
self.inventory.add_child(group, child_name)
def get_host_variables(self, path, host):
""" Runs <script> --host <hostname>, to determine additional host variables """
cmd = [path, "--host", host]
try:
sp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
raise AnsibleError("problem running %s (%s)" % (' '.join(cmd), e))
(out, err) = sp.communicate()
if out.strip() == '':
return {}
try:
return json_dict_bytes_to_unicode(self.loader.load(out, file_name=path))
except ValueError:
raise AnsibleError("could not parse post variable response: %s, %s" % (cmd, out))
|
py | b4094b50c418bc760ff0b99e316aef68e801a906 | import random
import numpy as np
import numpy.linalg as LA
import scipy as spy
import time
from itertools import *
import sys
import cvxpy as cvx
from random import randint
import numpy as np
import random
from scipy.sparse import csc_matrix
from scipy import sparse as sp
import networkx as nx
class ADMM_fully_distributed:
def __init__(self, X, y, b,pos_node ,temp, Lambda, Rho):
self.X = X
self.y = y
self.dim = X.shape[1]
self.Lambda = Lambda
self.Rho = Rho
self.temp = temp
self.num_nodes = nx.number_of_nodes(self.temp)
self.Z = csc_matrix((self.num_nodes, self.num_nodes), dtype=np.float).toarray()
self.U = csc_matrix((self.num_nodes, self.num_nodes), dtype=np.float).toarray()
for EI in self.temp.edges_iter():
self.Z[EI[0],EI[1]] = np.random.rand()
self.U[EI[0],EI[1]] = np.random.rand()
self.W = np.zeros((self.dim))
self.b = b
self.pos_node = pos_node
self.g = np.random.random((self.num_nodes,self.dim))
self.h = np.random.random((self.num_nodes,self.dim))
def update_W(self):
loss = 0
self.W = 0
for i in range(self.num_nodes):
self.W += (self.g[i,:] - self.h[i,:])/self.num_nodes
def update_b(self):
B = []
for i in range(self.num_nodes):
bi = cvx.Variable(1)
loss = cvx.logistic(-cvx.mul_elemwise(self.y[i], self.X[i].dot(self.g[i,:])+bi))*self.temp.node[i]['pos_node_prob']
for Id in self.temp.neighbors(i):
loss = loss+(bi-self.Z[i,Id]+ self.U[i,Id])**2*self.Rho/2
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=False)
self.b[i] = bi.value
def update_g(self):
for i in range(self.num_nodes):
gt = cvx.Variable(self.dim)
loss = cvx.logistic(-cvx.mul_elemwise(self.y[i], self.X[i]*gt+self.b[i]))*self.temp.node[i]['pos_node_prob']
loss += cvx.norm(self.W - gt + self.h[i,:])**2*self.Rho/2
problem = cvx.Problem(cvx.Minimize(loss))
problem.solve(verbose=False)
self.g[i,:] = gt.value.ravel()
def update_Z(self):
for k in self.temp.nodes_iter():
for j in self.temp.neighbors(k):
A = self.b[j] + self.U[j,k]
B = self.b[k] + self.U[k,j]
self.Z[k,j] = (2*self.Lambda*self.temp[j][k]['pos_edge_prob']*A + (2*self.Lambda*self.temp[j][k]['pos_edge_prob']+self.Rho)*B)/(self.Lambda*4*self.temp[j][k]['pos_edge_prob']+self.Rho)
def update_U(self):
for i in self.temp.nodes_iter():
for Id in self.temp.neighbors(i):
self.U[i,Id] = self.U[i,Id] + self.b[i] - self.Z[i,Id]
def update_h(self):
for i in range(self.num_nodes):
self.h[i,:] = self.h[i,:] + (self.W -self.g[i,:])
def runADMM_Grid(self,iterations):
for i in range(iterations):
W_old = self.W
b_old = self.b
self.update_W()
self.update_b()
self.update_Z()
self.update_g()
self.update_h()
self.update_U()
if i%1 == 0:
print 'iteration = ',i, 'objective = ', self.cal_LL()
def cal_LL(self):
W = np.array(self.W).flatten()
b = np.array(self.b).flatten()
loss = np.sum(np.multiply(np.array(self.pos_node),np.log( (1+np.exp(-np.multiply(self.y,np.dot(self.X,W)+b))))))
for EI in self.temp.edges_iter():
loss += self.Lambda*(self.b[EI[0]]-self.b[EI[1]])**2*self.temp[EI[0]][EI[1]]['pos_edge_prob']
return loss
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.