ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a35a136003e6581b47ac8e9a5c8ec2f47d28f3f | import torch
from torch.nn import Parameter
from torch_scatter import scatter_add
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.utils import add_remaining_self_loops
from torch_geometric.nn.inits import glorot, zeros
class GCNConv(MessagePassing):
r"""The graph convolutional operator from the `"Semi-supervised
Classification with Graph Convolutional Networks"
<https://arxiv.org/abs/1609.02907>`_ paper
.. math::
\mathbf{X}^{\prime} = \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2} \mathbf{X} \mathbf{\Theta},
where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the
adjacency matrix with inserted self-loops and
:math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix.
Args:
in_channels (int): Size of each input sample.
out_channels (int): Size of each output sample.
improved (bool, optional): If set to :obj:`True`, the layer computes
:math:`\mathbf{\hat{A}}` as :math:`\mathbf{A} + 2\mathbf{I}`.
(default: :obj:`False`)
cached (bool, optional): If set to :obj:`True`, the layer will cache
the computation of :math:`\mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}}
\mathbf{\hat{D}}^{-1/2}` on first execution, and will use the
cached version for further executions.
This parameter should only be set to :obj:`True` in transductive
learning scenarios. (default: :obj:`False`)
bias (bool, optional): If set to :obj:`False`, the layer will not learn
an additive bias. (default: :obj:`True`)
**kwargs (optional): Additional arguments of
:class:`torch_geometric.nn.conv.MessagePassing`.
"""
def __init__(self, in_channels, out_channels, improved=False, cached=False,
bias=True, **kwargs):
super(GCNConv, self).__init__(aggr='add', **kwargs)
self.in_channels = in_channels
self.out_channels = out_channels
self.improved = improved
self.cached = cached
self.weight = Parameter(torch.Tensor(in_channels, out_channels))
if bias:
self.bias = Parameter(torch.Tensor(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
glorot(self.weight)
zeros(self.bias)
self.cached_result = None
self.cached_num_edges = None
@staticmethod
def norm(edge_index, num_nodes, edge_weight=None, improved=False,
dtype=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1), ), dtype=dtype,
device=edge_index.device)
fill_value = 1 if not improved else 2
edge_index, edge_weight = add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def forward(self, x, edge_index, edge_weight=None):
""""""
x = torch.matmul(x, self.weight)
if self.cached and self.cached_result is not None:
if edge_index.size(1) != self.cached_num_edges:
raise RuntimeError(
'Cached {} number of edges, but found {}. Please '
'disable the caching behavior of this layer by removing '
'the `cached=True` argument in its constructor.'.format(
self.cached_num_edges, edge_index.size(1)))
if not self.cached or self.cached_result is None:
self.cached_num_edges = edge_index.size(1)
edge_index, norm = self.norm(edge_index, x.size(0), edge_weight,
self.improved, x.dtype)
self.cached_result = edge_index, norm
edge_index, norm = self.cached_result
return self.propagate(edge_index, x=x, norm=norm)
def message(self, x_j, norm, edge_index_j):
return norm.view(-1, 1) * x_j
def update(self, aggr_out):
if self.bias is not None:
aggr_out = aggr_out + self.bias
return aggr_out
def __repr__(self):
return '{}({}, {})'.format(self.__class__.__name__, self.in_channels,
self.out_channels)
|
py | 1a35a223ab8aa54c59ade29968502e9aee589639 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from airflow.contrib.operators.gcs_to_s3 import GoogleCloudStorageToS3Operator
from airflow.hooks.S3_hook import S3Hook
try:
from unittest import mock
except ImportError:
try:
import mock
except ImportError:
mock = None
try:
from moto import mock_s3
except ImportError:
mock_s3 = None
TASK_ID = 'test-gcs-list-operator'
GCS_BUCKET = 'test-bucket'
DELIMITER = '.csv'
PREFIX = 'TEST'
S3_BUCKET = 's3://bucket/'
MOCK_FILES = ["TEST1.csv", "TEST2.csv", "TEST3.csv"]
class GoogleCloudStorageToS3OperatorTest(unittest.TestCase):
@mock_s3
@mock.patch('airflow.contrib.operators.gcs_list_operator.GoogleCloudStorageHook')
@mock.patch('airflow.contrib.operators.gcs_to_s3.GoogleCloudStorageHook')
def test_execute(self, mock_hook, mock_hook2):
mock_hook.return_value.list.return_value = MOCK_FILES
mock_hook.return_value.download.return_value = b"testing"
mock_hook2.return_value.list.return_value = MOCK_FILES
operator = GoogleCloudStorageToS3Operator(task_id=TASK_ID,
bucket=GCS_BUCKET,
prefix=PREFIX,
delimiter=DELIMITER,
dest_aws_conn_id=None,
dest_s3_key=S3_BUCKET)
# create dest bucket
hook = S3Hook(aws_conn_id=None)
b = hook.get_bucket('bucket')
b.create()
b.put_object(Key=MOCK_FILES[0], Body=b'testing')
# we expect MOCK_FILES[1:] to be uploaded
# and all MOCK_FILES to be present at the S3 bucket
uploaded_files = operator.execute(None)
self.assertEqual(sorted(MOCK_FILES[1:]),
sorted(uploaded_files))
self.assertEqual(sorted(MOCK_FILES),
sorted(hook.list_keys('bucket', delimiter='/')))
|
py | 1a35a4110607f29ecd0cdf2bd5fb8bcfe1dee622 | import pandas as pd
from data.dataset import Metric
wrap_cpu = Metric.CPU_TIME.value
wrap_wc = Metric.WALL_TIME.value
core_count = Metric.USED_CORES.value
cpu_time_per_core = Metric.CPU_TIME_PER_CORE
def cpu_efficiency(df, include_zero_cpu=False):
"""Compute the CPU efficiency from a data frame containing job monitoring information."""
df_filtered = filter_cpu_efficiency(df, include_zero=include_zero_cpu)
df_filtered['max_cpu_time'] = df_filtered[wrap_wc] * df_filtered[core_count]
# Do not count NaN values here
total_walltime = df_filtered['max_cpu_time'].sum()
total_cpu_time = df_filtered[wrap_cpu].sum()
return total_cpu_time / total_walltime
def filter_cpu_efficiency(df, cols=None, include_zero=False):
if not cols:
cols = [Metric.WALL_TIME.value, Metric.CPU_TIME.value]
df_filtered = df.copy()
for col in cols:
if include_zero:
mask = df_filtered[col] >= 0
else:
mask = df_filtered[col] > 0
df_filtered = df_filtered[mask]
return df_filtered
def calculate_efficiencies(jobs: pd.DataFrame, freq='D'):
df = jobs[[Metric.STOP_TIME.value, Metric.WALL_TIME.value, Metric.CPU_TIME.value, Metric.USED_CORES.value]].copy()
df = filter_cpu_efficiency(df, include_zero=False)
df['MaxCPUTime'] = df[Metric.WALL_TIME.value] * df[Metric.USED_CORES.value]
df['day'] = df[Metric.STOP_TIME.value].dt.round(freq)
timeseries = df.groupby('day').apply(lambda x: x[Metric.CPU_TIME.value].sum() / x['MaxCPUTime'].sum())
overall_efficiency = cpu_efficiency(jobs, include_zero_cpu=False)
return timeseries, overall_efficiency
def cpu_efficiency_scaled_by_jobslots(df, include_zero_cpu=False, physical=False):
"""Compute the CPU efficiency from a data frame containing job monitoring information,
but scale the result with the number of jobslots available in the node, either with physical or logical cores.
"""
df_filtered = filter_cpu_efficiency(df, include_zero=include_zero_cpu)
if physical:
core_col = 'cores'
else:
core_col = 'coresLogical'
total_walltime = \
(df_filtered[wrap_wc] * df_filtered[core_count] * df_filtered[core_col] / df_filtered['jobslots']).sum()
total_cpu_time = df_filtered[wrap_cpu].sum()
return total_cpu_time / total_walltime
|
py | 1a35a4351b04fae04089dbf6e4886a910ba66dcf | class Solution:
def removeElement(self, nums, val):
"""
:type nums: List[int]
:type val: int
:rtype: int
"""
if not nums:
return 0
i = 0
n = len(nums)
while i < n:
if nums[i] == val:
nums[i] = nums[n - 1]
n-=1
else:
i+=1
return n |
py | 1a35a578026f0bcdeb4ba3f7d7dcb00b5094fde2 | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test multiple RPC users."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_equal,
get_datadir_path,
str_to_b64str,
)
import os
import http.client
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
def call_with_auth(node, user, password):
url = urllib.parse.urlparse(node.url)
headers = {"Authorization": "Basic " + str_to_b64str('{}:{}'.format(user, password))}
conn = http.client.HTTPConnection(url.hostname, url.port)
conn.connect()
conn.request('POST', '/', '{"method": "getbestblockhash"}', headers)
resp = conn.getresponse()
conn.close()
return resp
class HTTPBasicsTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.supports_cli = False
def setup_chain(self):
super().setup_chain()
#Append rpcauth to bitcoin.conf before initialization
self.rtpassword = "cA773lm788buwYe4g4WT+05pKyNruVKjQ25x3n0DQcM="
rpcauth = "rpcauth=rt:93648e835a54c573682c2eb19f882535$7681e9c5b74bdd85e78166031d2058e1069b3ed7ed967c93fc63abba06f31144"
self.rpcuser = "rpcuser💻"
self.rpcpassword = "rpcpassword🔑"
config = configparser.ConfigParser()
config.read_file(open(self.options.configfile))
gen_rpcauth = config['environment']['RPCAUTH']
# Generate RPCAUTH with specified password
self.rt2password = "8/F3uMDw4KSEbw96U3CA1C4X05dkHDN2BPFjTgZW4KI="
p = subprocess.Popen([sys.executable, gen_rpcauth, 'rt2', self.rt2password], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth2 = lines[1]
# Generate RPCAUTH without specifying password
self.user = ''.join(SystemRandom().choice(string.ascii_letters + string.digits) for _ in range(10))
p = subprocess.Popen([sys.executable, gen_rpcauth, self.user], stdout=subprocess.PIPE, universal_newlines=True)
lines = p.stdout.read().splitlines()
rpcauth3 = lines[1]
self.password = lines[3]
with open(os.path.join(get_datadir_path(self.options.tmpdir, 0), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write(rpcauth + "\n")
f.write(rpcauth2 + "\n")
f.write(rpcauth3 + "\n")
with open(os.path.join(get_datadir_path(self.options.tmpdir, 1), "bitcoin.conf"), 'a', encoding='utf8') as f:
f.write("rpcuser={}\n".format(self.rpcuser))
f.write("rpcpassword={}\n".format(self.rpcpassword))
def test_auth(self, node, user, password):
self.log.info('Correct...')
assert_equal(200, call_with_auth(node, user, password).status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user, password + 'wrong').status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user + 'wrong', password).status)
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user + 'wrong', password + 'wrong').status)
def run_test(self):
self.log.info('Check correctness of the rpcauth config option')
url = urllib.parse.urlparse(self.nodes[0].url)
self.test_auth(self.nodes[0], url.username, url.password)
self.test_auth(self.nodes[0], 'rt', self.rtpassword)
self.test_auth(self.nodes[0], 'rt2', self.rt2password)
self.test_auth(self.nodes[0], self.user, self.password)
self.log.info('Check correctness of the rpcuser/rpcpassword config options')
url = urllib.parse.urlparse(self.nodes[1].url)
self.test_auth(self.nodes[1], self.rpcuser, self.rpcpassword)
init_error = 'Error: Unable to start HTTP server. See debug log for details.'
self.log.info('Check -rpcauth are validated')
# Empty -rpcauth= are ignored
self.restart_node(0, extra_args=['-rpcauth='])
self.stop_node(0)
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo:bar'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo:bar:baz'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo$bar:baz'])
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error, extra_args=['-rpcauth=foo$bar$baz'])
self.log.info('Check that failure to write cookie file will abort the node gracefully')
cookie_file = os.path.join(get_datadir_path(self.options.tmpdir, 0), self.chain, '.cookie.tmp')
os.mkdir(cookie_file)
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error)
if __name__ == '__main__':
HTTPBasicsTest().main()
|
py | 1a35a6c09c07c3d73773aa06f35f5fb497355b4a | #!/usr/bin/env python
#
# Copyright (c) 2016 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
# l
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os, util
from string import Template
import jni_gen
def is_manually_generated(f_name, plugin_name):
return f_name in {'control_ping_reply'}
class_reference_template = Template("""jclass ${ref_name}Class;
""")
find_class_invocation_template = Template("""
${ref_name}Class = (jclass)(*env)->NewGlobalRef(env, (*env)->FindClass(env, "io/fd/vpp/jvpp/${plugin_name}/dto/${class_name}"));
if ((*env)->ExceptionCheck(env)) {
(*env)->ExceptionDescribe(env);
return JNI_ERR;
}""")
find_class_template = Template("""
${ref_name}Class = (jclass)(*env)->NewGlobalRef(env, (*env)->FindClass(env, "${class_name}"));
if ((*env)->ExceptionCheck(env)) {
(*env)->ExceptionDescribe(env);
return JNI_ERR;
}""")
delete_class_invocation_template = Template("""
if (${ref_name}Class) {
(*env)->DeleteGlobalRef(env, ${ref_name}Class);
}""")
class_cache_template = Template("""
$class_references
static int cache_class_references(JNIEnv* env) {
$find_class_invocations
return 0;
}
static void delete_class_references(JNIEnv* env) {
$delete_class_invocations
}""")
def generate_class_cache(func_list, plugin_name):
class_references = []
find_class_invocations = []
delete_class_invocations = []
for f in func_list:
c_name = f['name']
class_name = util.underscore_to_camelcase_upper(c_name)
ref_name = util.underscore_to_camelcase(c_name)
if util.is_ignored(c_name) or util.is_control_ping(class_name):
continue
if util.is_reply(class_name):
class_references.append(class_reference_template.substitute(
ref_name=ref_name))
find_class_invocations.append(find_class_invocation_template.substitute(
plugin_name=plugin_name,
ref_name=ref_name,
class_name=class_name))
delete_class_invocations.append(delete_class_invocation_template.substitute(ref_name=ref_name))
elif util.is_notification(c_name):
class_references.append(class_reference_template.substitute(
ref_name=util.add_notification_suffix(ref_name)))
find_class_invocations.append(find_class_invocation_template.substitute(
plugin_name=plugin_name,
ref_name=util.add_notification_suffix(ref_name),
class_name=util.add_notification_suffix(class_name)))
delete_class_invocations.append(delete_class_invocation_template.substitute(
ref_name=util.add_notification_suffix(ref_name)))
# add exception class to class cache
ref_name = 'callbackException'
class_name = 'io/fd/vpp/jvpp/VppCallbackException'
class_references.append(class_reference_template.substitute(
ref_name=ref_name))
find_class_invocations.append(find_class_template.substitute(
ref_name=ref_name,
class_name=class_name))
delete_class_invocations.append(delete_class_invocation_template.substitute(ref_name=ref_name))
return class_cache_template.substitute(
class_references="".join(class_references), find_class_invocations="".join(find_class_invocations),
delete_class_invocations="".join(delete_class_invocations))
# TODO: cache method and field identifiers to achieve better performance
# https://jira.fd.io/browse/HONEYCOMB-42
request_class_template = Template("""
jclass requestClass = (*env)->FindClass(env, "io/fd/vpp/jvpp/${plugin_name}/dto/${java_name_upper}");""")
request_field_identifier_template = Template("""
jfieldID ${field_reference_name}FieldId = (*env)->GetFieldID(env, ${object_name}Class, "${field_name}", "${jni_signature}");
${jni_type} ${field_reference_name} = (*env)->Get${jni_getter}(env, ${object_name}, ${field_reference_name}FieldId);
""")
jni_impl_template = Template("""
/**
* JNI binding for sending ${c_name} message.
* Generated based on $inputfile preparsed data:
$api_data
*/
JNIEXPORT jint JNICALL Java_io_fd_vpp_jvpp_${plugin_name}_JVpp${java_plugin_name}Impl_${field_name}0
(JNIEnv * env, jclass clazz$args) {
${plugin_name}_main_t *plugin_main = &${plugin_name}_main;
vl_api_${c_name}_t * mp;
u32 my_context_id = vppjni_get_context_id (&jvpp_main);
$request_class
// create message:
mp = vl_msg_api_alloc(sizeof(*mp));
memset (mp, 0, sizeof (*mp));
mp->_vl_msg_id = ntohs (VL_API_${c_name_uppercase} + plugin_main->msg_id_base);
mp->client_index = plugin_main->my_client_index;
mp->context = clib_host_to_net_u32 (my_context_id);
$msg_initialization
// send message:
vl_msg_api_send_shmem (plugin_main->vl_input_queue, (u8 *)&mp);
if ((*env)->ExceptionCheck(env)) {
return JNI_ERR;
}
return my_context_id;
}""")
def generate_jni_impl(func_list, plugin_name, inputfile):
jni_impl = []
for f in func_list:
f_name = f['name']
camel_case_function_name = util.underscore_to_camelcase(f_name)
if is_manually_generated(f_name, plugin_name) or util.is_reply(camel_case_function_name) \
or util.is_ignored(f_name) or util.is_just_notification(f_name):
continue
arguments = ''
request_class = ''
msg_initialization = ''
f_name_uppercase = f_name.upper()
if f['args']:
arguments = ', jobject request'
camel_case_function_name_upper = util.underscore_to_camelcase_upper(f_name)
request_class = request_class_template.substitute(
java_name_upper=camel_case_function_name_upper,
plugin_name=plugin_name)
for t in zip(f['types'], f['args'], f['lengths']):
field_name = util.underscore_to_camelcase(t[1])
msg_initialization += jni_gen.jni_request_binding_for_type(field_type=t[0], c_name=t[1],
field_reference_name=field_name,
field_name=field_name,
field_length=t[2][0],
is_variable_len_array=t[2][1])
jni_impl.append(jni_impl_template.substitute(
inputfile=inputfile,
api_data=util.api_message_to_javadoc(f),
field_reference_name=camel_case_function_name,
field_name=camel_case_function_name,
c_name_uppercase=f_name_uppercase,
c_name=f_name,
plugin_name=plugin_name,
java_plugin_name=plugin_name.title(),
request_class=request_class,
msg_initialization=msg_initialization,
args=arguments))
return "\n".join(jni_impl)
# code fragment for checking result of the operation before sending request reply
callback_err_handler_template = Template("""
// for negative result don't send callback message but send error callback
if (mp->retval<0) {
call_on_error("${handler_name}", mp->context, mp->retval, plugin_main->callbackClass, plugin_main->callbackObject, callbackExceptionClass);
return;
}
if (mp->retval == VNET_API_ERROR_IN_PROGRESS) {
clib_warning("Result in progress");
return;
}
""")
msg_handler_template = Template("""
/**
* Handler for ${handler_name} message.
* Generated based on $inputfile preparsed data:
$api_data
*/
static void vl_api_${handler_name}_t_handler (vl_api_${handler_name}_t * mp)
{
${plugin_name}_main_t *plugin_main = &${plugin_name}_main;
JNIEnv *env = jvpp_main.jenv;
$err_handler
jmethodID constructor = (*env)->GetMethodID(env, ${class_ref_name}Class, "<init>", "()V");
jmethodID callbackMethod = (*env)->GetMethodID(env, plugin_main->callbackClass, "on${dto_name}", "(Lio/fd/vpp/jvpp/${plugin_name}/dto/${dto_name};)V");
jobject dto = (*env)->NewObject(env, ${class_ref_name}Class, constructor);
$dto_setters
(*env)->CallVoidMethod(env, plugin_main->callbackObject, callbackMethod, dto);
}""")
def generate_msg_handlers(func_list, plugin_name, inputfile):
handlers = []
for f in func_list:
handler_name = f['name']
dto_name = util.underscore_to_camelcase_upper(handler_name)
ref_name = util.underscore_to_camelcase(handler_name)
if is_manually_generated(handler_name, plugin_name) or util.is_ignored(handler_name):
continue
if not util.is_reply(dto_name) and not util.is_notification(handler_name):
continue
if util.is_notification(handler_name):
dto_name = util.add_notification_suffix(dto_name)
ref_name = util.add_notification_suffix(ref_name)
dto_setters = ''
err_handler = ''
# dto setters
for t in zip(f['types'], f['args'], f['lengths']):
c_name = t[1]
java_name = util.underscore_to_camelcase(c_name)
field_length = t[2][0]
is_variable_len_array = t[2][1]
length_field_type = None
if is_variable_len_array:
length_field_type = f['types'][f['args'].index(field_length)]
dto_setters += jni_gen.jni_reply_handler_for_type(handler_name=handler_name, ref_name=ref_name,
field_type=t[0], c_name=t[1],
field_reference_name=java_name,
field_name=java_name, field_length=field_length,
is_variable_len_array=is_variable_len_array,
length_field_type=length_field_type)
# for retval don't generate setters and generate retval check
if util.is_retval_field(c_name):
err_handler = callback_err_handler_template.substitute(
handler_name=handler_name
)
continue
handlers.append(msg_handler_template.substitute(
inputfile=inputfile,
api_data=util.api_message_to_javadoc(f),
handler_name=handler_name,
plugin_name=plugin_name,
dto_name=dto_name,
class_ref_name=ref_name,
dto_setters=dto_setters,
err_handler=err_handler))
return "\n".join(handlers)
handler_registration_template = Template("""_(${upercase_name}, ${name}) \\
""")
def generate_handler_registration(func_list):
handler_registration = ["#define foreach_api_reply_handler \\\n"]
for f in func_list:
name = f['name']
camelcase_name = util.underscore_to_camelcase(f['name'])
if (not util.is_reply(camelcase_name) and not util.is_notification(name)) or util.is_ignored(name) \
or util.is_control_ping(camelcase_name):
continue
handler_registration.append(handler_registration_template.substitute(
name=name,
upercase_name=name.upper()))
return "".join(handler_registration)
jvpp_c_template = Template("""/**
* This file contains JNI bindings for jvpp Java API.
* It was generated by jvpp_c_gen.py based on $inputfile
* (python representation of api file generated by vppapigen).
*/
// JAVA class reference cache
$class_cache
// JNI bindings
$jni_implementations
// Message handlers
$msg_handlers
// Registration of message handlers in vlib
$handler_registration
""")
def generate_jvpp(func_list, plugin_name, inputfile):
""" Generates jvpp C file """
print "Generating jvpp C"
class_cache = generate_class_cache(func_list, plugin_name)
jni_impl = generate_jni_impl(func_list, plugin_name, inputfile)
msg_handlers = generate_msg_handlers(func_list, plugin_name, inputfile)
handler_registration = generate_handler_registration(func_list)
jvpp_c_file = open("jvpp_%s_gen.h" % plugin_name, 'w')
jvpp_c_file.write(jvpp_c_template.substitute(
inputfile=inputfile,
class_cache=class_cache,
jni_implementations=jni_impl,
msg_handlers=msg_handlers,
handler_registration=handler_registration))
jvpp_c_file.flush()
jvpp_c_file.close()
|
py | 1a35a8d241a584d2042a95741d1da29c59139335 | from dataclasses import dataclass, field
from typing import Optional
from .t_base_element import TBaseElement
from .t_formal_expression import TFormalExpression
from .t_implicit_throw_event import TImplicitThrowEvent
__NAMESPACE__ = "http://www.omg.org/spec/BPMN/20100524/MODEL"
@dataclass
class TComplexBehaviorDefinition(TBaseElement):
class Meta:
name = "tComplexBehaviorDefinition"
condition: Optional[TFormalExpression] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.omg.org/spec/BPMN/20100524/MODEL",
"required": True,
}
)
event: Optional[TImplicitThrowEvent] = field(
default=None,
metadata={
"type": "Element",
"namespace": "http://www.omg.org/spec/BPMN/20100524/MODEL",
}
)
|
py | 1a35a9e3d526c4e6cc32e6da85b25095dfc6126d | # ===============================================================================
# Copyright 2012 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from __future__ import absolute_import
from traits.api import HasTraits, cached_property, Property, Tuple, Any, Float
# ============= standard library imports ========================
# from numpy import array
# ============= local library imports ==========================
# from pychron.core.geometry.convex_hull import convex_hull_area
# from pychron.core.geometry.centroid import calculate_centroid
# from pychron.core.codetools.simple_timeit import timethis
# from pychron.core.geometry.convex_hull import convex_hull_area
# from pychron.core.geometry.centroid.calculate_centroid import calculate_centroid
class Target:
poly_points = None
bounding_rect = None
# threshold = None
area = 0
convex_hull_area = 0
origin = None
centroid = None
min_enclose_area = 0
pactual = 0
pconvex_hull = 0
@property
def dev_centroid(self):
return ((self.origin[0] - self.centroid[0]),
(self.origin[1] - self.centroid[1]))
@property
def aspect_ratio(self):
return self.bounding_rect.width / float(self.bounding_rect.height)
@property
def convexity(self):
# return True
return self.area / self.min_enclose_area
@property
def perimeter_convexity(self):
return self.pconvex_hull / self.pactual
# return self.
# r = timethis(convex_hull_area, args=(self.poly_points,))
# return self.area / convex_hull_area()
# return self.area / convex_hull_area(self.poly_points)
# @cached_property
# def _get_centroid(self):
# # pts = array([(pt.x, pt.y) for pt in self.poly_points], dtype=float)
# # v = timethis(calculate_centroid, args=(self.poly_points,))
# return calculate_centroid(self.poly_points)
# # print v
# return v
# @property
# def dev_br(self):
# return ((self.origin[0] - self.bounding_rect[0]),
# (self.origin[1] - self.bounding_rect[1]))
# @property
# def bounding_area(self):
# return self.bounding_rect[1] ** 2 * 3.1415
# print self.bounding_rect
# return 1
# return self.bounding_rect[2] * self.bounding_rect[3]
# ============= EOF =============================================
|
py | 1a35ab9d931d0aca47e9df1b2111ffa6fd19e0ee | import os
from threading import Thread
def threaded_io():
print 'Thread running'
print 'Thread exiting'
print 'PID: ', os.getpid()
t = Thread(target=threaded_io)
t.start()
|
py | 1a35abb99ad97016a88f81a1f6caee8f2105d7b2 | import time
from typing import Dict
from starlette.applications import Starlette
from core import grow_counter, transport
app = Starlette(debug=True)
# Send updates every 2 seconds
UPDATE_INTERVAL = 2
class ViewCounter:
def __init__(self):
self.last_update = time.time()
self.state = grow_counter.new()
def increment(self):
self.state = grow_counter.increment(self.state)
def merge(self, updates: Dict):
self.state = grow_counter.merge(self.state, updates)
def reset_time(self):
self.last_update = time.time()
@property
def count(self) -> int:
return grow_counter.value(self.state)
def may_be_update(view_counter: ViewCounter):
if time.time() - view_counter.last_update > UPDATE_INTERVAL:
view_counter.reset_time()
transport.send_updates(view_counter.state)
|
py | 1a35ada10a1ac405842e0660f568d77aa46438f1 | # coding: utf-8
from __future__ import absolute_import
# import DcsClient
from huaweicloudsdkdcs.v2.dcs_client import DcsClient
from huaweicloudsdkdcs.v2.dcs_async_client import DcsAsyncClient
# import models into sdk package
from huaweicloudsdkdcs.v2.model.attrs_object import AttrsObject
from huaweicloudsdkdcs.v2.model.autoscan_config_request import AutoscanConfigRequest
from huaweicloudsdkdcs.v2.model.available_zones import AvailableZones
from huaweicloudsdkdcs.v2.model.backup_files_body import BackupFilesBody
from huaweicloudsdkdcs.v2.model.backup_instance_body import BackupInstanceBody
from huaweicloudsdkdcs.v2.model.backup_plan import BackupPlan
from huaweicloudsdkdcs.v2.model.backup_policy import BackupPolicy
from huaweicloudsdkdcs.v2.model.backup_record_response import BackupRecordResponse
from huaweicloudsdkdcs.v2.model.batch_create_or_delete_tags_request import BatchCreateOrDeleteTagsRequest
from huaweicloudsdkdcs.v2.model.batch_create_or_delete_tags_response import BatchCreateOrDeleteTagsResponse
from huaweicloudsdkdcs.v2.model.batch_delete_body import BatchDeleteBody
from huaweicloudsdkdcs.v2.model.batch_delete_instances_request import BatchDeleteInstancesRequest
from huaweicloudsdkdcs.v2.model.batch_delete_instances_response import BatchDeleteInstancesResponse
from huaweicloudsdkdcs.v2.model.batch_ops_result import BatchOpsResult
from huaweicloudsdkdcs.v2.model.bigkeys_body import BigkeysBody
from huaweicloudsdkdcs.v2.model.bss_param import BssParam
from huaweicloudsdkdcs.v2.model.change_instance_status_body import ChangeInstanceStatusBody
from huaweicloudsdkdcs.v2.model.change_master_standby_request import ChangeMasterStandbyRequest
from huaweicloudsdkdcs.v2.model.change_master_standby_response import ChangeMasterStandbyResponse
from huaweicloudsdkdcs.v2.model.cluster_redis_node_monitored_object import ClusterRedisNodeMonitoredObject
from huaweicloudsdkdcs.v2.model.copy_instance_request import CopyInstanceRequest
from huaweicloudsdkdcs.v2.model.copy_instance_response import CopyInstanceResponse
from huaweicloudsdkdcs.v2.model.create_bigkey_scan_task_request import CreateBigkeyScanTaskRequest
from huaweicloudsdkdcs.v2.model.create_bigkey_scan_task_response import CreateBigkeyScanTaskResponse
from huaweicloudsdkdcs.v2.model.create_hotkey_scan_task_request import CreateHotkeyScanTaskRequest
from huaweicloudsdkdcs.v2.model.create_hotkey_scan_task_response import CreateHotkeyScanTaskResponse
from huaweicloudsdkdcs.v2.model.create_instance_body import CreateInstanceBody
from huaweicloudsdkdcs.v2.model.create_instance_request import CreateInstanceRequest
from huaweicloudsdkdcs.v2.model.create_instance_response import CreateInstanceResponse
from huaweicloudsdkdcs.v2.model.create_migration_task_body import CreateMigrationTaskBody
from huaweicloudsdkdcs.v2.model.create_migration_task_request import CreateMigrationTaskRequest
from huaweicloudsdkdcs.v2.model.create_migration_task_response import CreateMigrationTaskResponse
from huaweicloudsdkdcs.v2.model.create_or_delete_instance_tags import CreateOrDeleteInstanceTags
from huaweicloudsdkdcs.v2.model.delete_background_task_request import DeleteBackgroundTaskRequest
from huaweicloudsdkdcs.v2.model.delete_background_task_response import DeleteBackgroundTaskResponse
from huaweicloudsdkdcs.v2.model.delete_backup_file_request import DeleteBackupFileRequest
from huaweicloudsdkdcs.v2.model.delete_backup_file_response import DeleteBackupFileResponse
from huaweicloudsdkdcs.v2.model.delete_bigkey_scan_task_request import DeleteBigkeyScanTaskRequest
from huaweicloudsdkdcs.v2.model.delete_bigkey_scan_task_response import DeleteBigkeyScanTaskResponse
from huaweicloudsdkdcs.v2.model.delete_hotkey_scan_task_request import DeleteHotkeyScanTaskRequest
from huaweicloudsdkdcs.v2.model.delete_hotkey_scan_task_response import DeleteHotkeyScanTaskResponse
from huaweicloudsdkdcs.v2.model.delete_ip_from_domain_name_request import DeleteIpFromDomainNameRequest
from huaweicloudsdkdcs.v2.model.delete_ip_from_domain_name_response import DeleteIpFromDomainNameResponse
from huaweicloudsdkdcs.v2.model.delete_migrate_task_request import DeleteMigrateTaskRequest
from huaweicloudsdkdcs.v2.model.delete_migration_task_request import DeleteMigrationTaskRequest
from huaweicloudsdkdcs.v2.model.delete_migration_task_response import DeleteMigrationTaskResponse
from huaweicloudsdkdcs.v2.model.delete_single_instance_request import DeleteSingleInstanceRequest
from huaweicloudsdkdcs.v2.model.delete_single_instance_response import DeleteSingleInstanceResponse
from huaweicloudsdkdcs.v2.model.details_body import DetailsBody
from huaweicloudsdkdcs.v2.model.dim_child import DimChild
from huaweicloudsdkdcs.v2.model.download_backup_files_req import DownloadBackupFilesReq
from huaweicloudsdkdcs.v2.model.files import Files
from huaweicloudsdkdcs.v2.model.flavor_az_object import FlavorAzObject
from huaweicloudsdkdcs.v2.model.flavors_items import FlavorsItems
from huaweicloudsdkdcs.v2.model.hotkeys_body import HotkeysBody
from huaweicloudsdkdcs.v2.model.instance_group_list_info import InstanceGroupListInfo
from huaweicloudsdkdcs.v2.model.instance_list_info import InstanceListInfo
from huaweicloudsdkdcs.v2.model.instance_replication_dimensions_info import InstanceReplicationDimensionsInfo
from huaweicloudsdkdcs.v2.model.instance_replication_list_info import InstanceReplicationListInfo
from huaweicloudsdkdcs.v2.model.instance_restore_info import InstanceRestoreInfo
from huaweicloudsdkdcs.v2.model.instance_statistic import InstanceStatistic
from huaweicloudsdkdcs.v2.model.instances import Instances
from huaweicloudsdkdcs.v2.model.instances_monitored_object import InstancesMonitoredObject
from huaweicloudsdkdcs.v2.model.links_item import LinksItem
from huaweicloudsdkdcs.v2.model.list_available_zones_request import ListAvailableZonesRequest
from huaweicloudsdkdcs.v2.model.list_available_zones_response import ListAvailableZonesResponse
from huaweicloudsdkdcs.v2.model.list_background_task_request import ListBackgroundTaskRequest
from huaweicloudsdkdcs.v2.model.list_background_task_response import ListBackgroundTaskResponse
from huaweicloudsdkdcs.v2.model.list_backup_file_links_request import ListBackupFileLinksRequest
from huaweicloudsdkdcs.v2.model.list_backup_file_links_response import ListBackupFileLinksResponse
from huaweicloudsdkdcs.v2.model.list_backup_records_request import ListBackupRecordsRequest
from huaweicloudsdkdcs.v2.model.list_backup_records_response import ListBackupRecordsResponse
from huaweicloudsdkdcs.v2.model.list_bigkey_scan_tasks_request import ListBigkeyScanTasksRequest
from huaweicloudsdkdcs.v2.model.list_bigkey_scan_tasks_response import ListBigkeyScanTasksResponse
from huaweicloudsdkdcs.v2.model.list_configurations_request import ListConfigurationsRequest
from huaweicloudsdkdcs.v2.model.list_configurations_response import ListConfigurationsResponse
from huaweicloudsdkdcs.v2.model.list_flavors_request import ListFlavorsRequest
from huaweicloudsdkdcs.v2.model.list_flavors_response import ListFlavorsResponse
from huaweicloudsdkdcs.v2.model.list_group_replication_info_request import ListGroupReplicationInfoRequest
from huaweicloudsdkdcs.v2.model.list_group_replication_info_response import ListGroupReplicationInfoResponse
from huaweicloudsdkdcs.v2.model.list_hot_key_scan_tasks_request import ListHotKeyScanTasksRequest
from huaweicloudsdkdcs.v2.model.list_hot_key_scan_tasks_response import ListHotKeyScanTasksResponse
from huaweicloudsdkdcs.v2.model.list_instances_request import ListInstancesRequest
from huaweicloudsdkdcs.v2.model.list_instances_response import ListInstancesResponse
from huaweicloudsdkdcs.v2.model.list_maintenance_windows_request import ListMaintenanceWindowsRequest
from huaweicloudsdkdcs.v2.model.list_maintenance_windows_response import ListMaintenanceWindowsResponse
from huaweicloudsdkdcs.v2.model.list_migration_task_request import ListMigrationTaskRequest
from huaweicloudsdkdcs.v2.model.list_migration_task_response import ListMigrationTaskResponse
from huaweicloudsdkdcs.v2.model.list_monitored_objects_of_instance_request import ListMonitoredObjectsOfInstanceRequest
from huaweicloudsdkdcs.v2.model.list_monitored_objects_of_instance_response import ListMonitoredObjectsOfInstanceResponse
from huaweicloudsdkdcs.v2.model.list_monitored_objects_request import ListMonitoredObjectsRequest
from huaweicloudsdkdcs.v2.model.list_monitored_objects_response import ListMonitoredObjectsResponse
from huaweicloudsdkdcs.v2.model.list_number_of_instances_in_different_status_request import ListNumberOfInstancesInDifferentStatusRequest
from huaweicloudsdkdcs.v2.model.list_number_of_instances_in_different_status_response import ListNumberOfInstancesInDifferentStatusResponse
from huaweicloudsdkdcs.v2.model.list_restore_records_request import ListRestoreRecordsRequest
from huaweicloudsdkdcs.v2.model.list_restore_records_response import ListRestoreRecordsResponse
from huaweicloudsdkdcs.v2.model.list_slowlog_request import ListSlowlogRequest
from huaweicloudsdkdcs.v2.model.list_slowlog_response import ListSlowlogResponse
from huaweicloudsdkdcs.v2.model.list_statistics_of_running_instances_request import ListStatisticsOfRunningInstancesRequest
from huaweicloudsdkdcs.v2.model.list_statistics_of_running_instances_response import ListStatisticsOfRunningInstancesResponse
from huaweicloudsdkdcs.v2.model.list_tags_of_tenant_request import ListTagsOfTenantRequest
from huaweicloudsdkdcs.v2.model.list_tags_of_tenant_response import ListTagsOfTenantResponse
from huaweicloudsdkdcs.v2.model.maintain_windows_entity import MaintainWindowsEntity
from huaweicloudsdkdcs.v2.model.migration_task_list import MigrationTaskList
from huaweicloudsdkdcs.v2.model.modify_instance_body import ModifyInstanceBody
from huaweicloudsdkdcs.v2.model.modify_instance_password_body import ModifyInstancePasswordBody
from huaweicloudsdkdcs.v2.model.modify_ip_whitelist_body import ModifyIpWhitelistBody
from huaweicloudsdkdcs.v2.model.modify_redis_config_body import ModifyRedisConfigBody
from huaweicloudsdkdcs.v2.model.priority_body import PriorityBody
from huaweicloudsdkdcs.v2.model.proxy_node_monitored_object import ProxyNodeMonitoredObject
from huaweicloudsdkdcs.v2.model.query_redis_config import QueryRedisConfig
from huaweicloudsdkdcs.v2.model.query_tenant_quota_resp_quotas import QueryTenantQuotaRespQuotas
from huaweicloudsdkdcs.v2.model.records_response import RecordsResponse
from huaweicloudsdkdcs.v2.model.redis_config import RedisConfig
from huaweicloudsdkdcs.v2.model.resource_tag import ResourceTag
from huaweicloudsdkdcs.v2.model.resources import Resources
from huaweicloudsdkdcs.v2.model.restart_or_flush_instances_request import RestartOrFlushInstancesRequest
from huaweicloudsdkdcs.v2.model.restart_or_flush_instances_response import RestartOrFlushInstancesResponse
from huaweicloudsdkdcs.v2.model.restore_instance_body import RestoreInstanceBody
from huaweicloudsdkdcs.v2.model.restore_instance_request import RestoreInstanceRequest
from huaweicloudsdkdcs.v2.model.restore_instance_response import RestoreInstanceResponse
from huaweicloudsdkdcs.v2.model.show_bigkey_autoscan_config_request import ShowBigkeyAutoscanConfigRequest
from huaweicloudsdkdcs.v2.model.show_bigkey_autoscan_config_response import ShowBigkeyAutoscanConfigResponse
from huaweicloudsdkdcs.v2.model.show_bigkey_scan_task_details_request import ShowBigkeyScanTaskDetailsRequest
from huaweicloudsdkdcs.v2.model.show_bigkey_scan_task_details_response import ShowBigkeyScanTaskDetailsResponse
from huaweicloudsdkdcs.v2.model.show_hotkey_autoscan_config_request import ShowHotkeyAutoscanConfigRequest
from huaweicloudsdkdcs.v2.model.show_hotkey_autoscan_config_response import ShowHotkeyAutoscanConfigResponse
from huaweicloudsdkdcs.v2.model.show_hotkey_task_details_request import ShowHotkeyTaskDetailsRequest
from huaweicloudsdkdcs.v2.model.show_hotkey_task_details_response import ShowHotkeyTaskDetailsResponse
from huaweicloudsdkdcs.v2.model.show_instance_request import ShowInstanceRequest
from huaweicloudsdkdcs.v2.model.show_instance_response import ShowInstanceResponse
from huaweicloudsdkdcs.v2.model.show_ip_whitelist_request import ShowIpWhitelistRequest
from huaweicloudsdkdcs.v2.model.show_ip_whitelist_response import ShowIpWhitelistResponse
from huaweicloudsdkdcs.v2.model.show_migration_task_request import ShowMigrationTaskRequest
from huaweicloudsdkdcs.v2.model.show_migration_task_response import ShowMigrationTaskResponse
from huaweicloudsdkdcs.v2.model.show_migration_task_stats_request import ShowMigrationTaskStatsRequest
from huaweicloudsdkdcs.v2.model.show_migration_task_stats_response import ShowMigrationTaskStatsResponse
from huaweicloudsdkdcs.v2.model.show_quota_of_tenant_request import ShowQuotaOfTenantRequest
from huaweicloudsdkdcs.v2.model.show_quota_of_tenant_response import ShowQuotaOfTenantResponse
from huaweicloudsdkdcs.v2.model.show_tags_request import ShowTagsRequest
from huaweicloudsdkdcs.v2.model.show_tags_response import ShowTagsResponse
from huaweicloudsdkdcs.v2.model.single_background_task import SingleBackgroundTask
from huaweicloudsdkdcs.v2.model.slowlog_item import SlowlogItem
from huaweicloudsdkdcs.v2.model.source_instance_body import SourceInstanceBody
from huaweicloudsdkdcs.v2.model.status_statistic import StatusStatistic
from huaweicloudsdkdcs.v2.model.stop_migration_task_request import StopMigrationTaskRequest
from huaweicloudsdkdcs.v2.model.stop_migration_task_response import StopMigrationTaskResponse
from huaweicloudsdkdcs.v2.model.tag import Tag
from huaweicloudsdkdcs.v2.model.target_instance_body import TargetInstanceBody
from huaweicloudsdkdcs.v2.model.update_bigkey_autoscan_config_request import UpdateBigkeyAutoscanConfigRequest
from huaweicloudsdkdcs.v2.model.update_bigkey_autoscan_config_response import UpdateBigkeyAutoscanConfigResponse
from huaweicloudsdkdcs.v2.model.update_configurations_request import UpdateConfigurationsRequest
from huaweicloudsdkdcs.v2.model.update_configurations_response import UpdateConfigurationsResponse
from huaweicloudsdkdcs.v2.model.update_hotkey_auto_scan_config_request import UpdateHotkeyAutoScanConfigRequest
from huaweicloudsdkdcs.v2.model.update_hotkey_auto_scan_config_response import UpdateHotkeyAutoScanConfigResponse
from huaweicloudsdkdcs.v2.model.update_instance_request import UpdateInstanceRequest
from huaweicloudsdkdcs.v2.model.update_instance_response import UpdateInstanceResponse
from huaweicloudsdkdcs.v2.model.update_ip_whitelist_request import UpdateIpWhitelistRequest
from huaweicloudsdkdcs.v2.model.update_ip_whitelist_response import UpdateIpWhitelistResponse
from huaweicloudsdkdcs.v2.model.update_password_request import UpdatePasswordRequest
from huaweicloudsdkdcs.v2.model.update_password_response import UpdatePasswordResponse
from huaweicloudsdkdcs.v2.model.update_slave_priority_request import UpdateSlavePriorityRequest
from huaweicloudsdkdcs.v2.model.update_slave_priority_response import UpdateSlavePriorityResponse
from huaweicloudsdkdcs.v2.model.whitelist import Whitelist
|
py | 1a35add60b7ed8664eef7e3ebdd5baf12eadc3ac | from __future__ import absolute_import, division, unicode_literals
from collections import OrderedDict
import re
from pip._vendor.six import string_types
from . import base
from .._utils import moduleFactoryFactory
tag_regexp = re.compile("{([^}]*)}(.*)")
def getETreeBuilder(ElementTreeImplementation):
ElementTree = ElementTreeImplementation
ElementTreeCommentType = ElementTree.Comment("asd").tag
class TreeWalker(base.NonRecursiveTreeWalker): # pylint:disable=unused-variable
"""Given the particular ElementTree representation, this implementation,
to avoid using recursion, returns "nodes" as tuples with the following
content:
1. The current element
2. The index of the element relative to its parent
3. A stack of ancestor elements
4. A flag "text", "tail" or None to indicate if the current node is a
text node; either the text or tail of the current element (1)
"""
def getNodeDetails(self, node):
if isinstance(node, tuple): # It might be the root Element
elt, _, _, flag = node
if flag in ("text", "tail"):
return base.TEXT, getattr(elt, flag)
else:
node = elt
if not(hasattr(node, "tag")):
node = node.getroot()
if node.tag in ("DOCUMENT_ROOT", "DOCUMENT_FRAGMENT"):
return (base.DOCUMENT,)
elif node.tag == "<!DOCTYPE>":
return (base.DOCTYPE, node.text,
node.get("publicId"), node.get("systemId"))
elif node.tag == ElementTreeCommentType:
return base.COMMENT, node.text
else:
assert isinstance(node.tag, string_types), type(node.tag)
# This is assumed to be an ordinary element
match = tag_regexp.match(node.tag)
if match:
namespace, tag = match.groups()
else:
namespace = None
tag = node.tag
attrs = OrderedDict()
for name, value in list(node.attrib.items()):
match = tag_regexp.match(name)
if match:
attrs[(match.group(1), match.group(2))] = value
else:
attrs[(None, name)] = value
return (base.ELEMENT, namespace, tag,
attrs, len(node) or node.text)
def getFirstChild(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
element, key, parents, flag = node, None, [], None
if flag in ("text", "tail"):
return None
else:
if element.text:
return element, key, parents, "text"
elif len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
def getNextSibling(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if len(element):
parents.append(element)
return element[0], 0, parents, None
else:
return None
else:
if element.tail and flag != "tail":
return element, key, parents, "tail"
elif key < len(parents[-1]) - 1:
return parents[-1][key + 1], key + 1, parents, None
else:
return None
def getParentNode(self, node):
if isinstance(node, tuple):
element, key, parents, flag = node
else:
return None
if flag == "text":
if not parents:
return element
else:
return element, key, parents, None
else:
parent = parents.pop()
if not parents:
return parent
else:
assert list(parents[-1]).count(parent) == 1
return parent, list(parents[-1]).index(parent), parents, None
return locals()
getETreeModule = moduleFactoryFactory(getETreeBuilder)
|
py | 1a35af048b7c97a2b523556b587a1c11fdbc7946 | #!/usr/bin/env python3
import argparse
import json
import urllib.parse
from collections import defaultdict
from oic.oic import Client, RegistrationResponse
from oic.oic.message import AuthorizationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
from oic import rndstr
from http.server import HTTPServer, BaseHTTPRequestHandler
from http import HTTPStatus
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
url = urllib.parse.urlparse(self.path)
if url.path == "/":
self._handle_initial()
elif url.path.startswith("/callback/"):
self._handle_callback()
else:
self.return_redirect("/")
def _handle_initial(self):
global session
# setup oic code flow
session["state"] = rndstr()
session["nonce"] = rndstr()
auth_req = client.construct_AuthorizationRequest(request_args={
"response_type": "code",
"scope": cmd_args.scope,
"state": session["state"],
"nonce": session["nonce"],
"redirect_uri": f"http://{self.server.server_address[0]}:{self.server.server_address[1]}/callback/"
})
login_url = auth_req.request(client.authorization_endpoint)
# send response
self.return_redirect(login_url)
def _handle_callback(self):
global session
# parse callback
auth_response = client.parse_response(AuthorizationResponse, info=self.path, sformat="urlencoded")
if auth_response["state"] != session["state"]:
self.send_error(HTTPStatus.BAD_REQUEST, "invalid state", explain="The state of the callback does not match in-memory state")
return
# exchange received code for proper access and refresh tokens
token_response = client.do_access_token_request(scope=cmd_args.scope, state=session["state"],
request_args={"code": auth_response["code"]})
# retrieve user information with newly received access token
userinfo = client.do_user_info_request(state=session["state"], scope=cmd_args.scope)
# output data
self.return_json_response({
"token_response": token_response.to_dict(),
"userinfo": userinfo.to_dict(),
})
print("===============================================================")
print(f"token_type: {token_response.get('token_type')}")
print("access_token:")
print(token_response.get("access_token"))
print("===============================================================")
def return_redirect(self, to: str, code: int = HTTPStatus.FOUND):
self.send_response(code)
self.send_header("location", to)
self.end_headers()
def return_json_response(self, content: dict):
self.send_response(HTTPStatus.OK)
self.send_header("Content-Type", "application/json")
self.end_headers()
self.wfile.write(json.dumps(content).encode("UTF-8"))
if __name__ == "__main__":
parser = argparse.ArgumentParser("get_tokens", description="Retrieves access and id tokens from Mafiasi Identity")
parser.add_argument("--issuer", help="OpenId Connect issuer. Defaults to Mafiasi",
default="https://identity.mafiasi.de/auth/realms/mafiasi")
parser.add_argument("--client-id", help="OpenId Connect client id. Defaults to dev-client",
default="dev-client")
parser.add_argument("--client-secret", help="OpenId Connect client secret. Defaults to dev-client's secret",
default="bb0c83bc-1dd9-4946-a074-d452bc1fb830")
parser.add_argument("--scope", help="OpenID scopes to request",
action="append", default=["openid"])
cmd_args = parser.parse_args()
# initialize openid client
client = Client(client_id=cmd_args.client_id, client_authn_method=CLIENT_AUTHN_METHOD)
client.provider_config(cmd_args.issuer)
client.store_registration_info(RegistrationResponse(client_id=cmd_args.client_id, client_secret=cmd_args.client_secret))
# initialize a session object (which is very primitive but works)
session = defaultdict(lambda: "")
# serve a basic http server so that authorization code flow can be used
with HTTPServer(("127.0.0.1", 8080), RequestHandler) as server:
print(f"Open http://{server.server_name}:{server.server_port}")
try:
server.serve_forever()
except KeyboardInterrupt:
pass
|
py | 1a35af65d4b694dd7146d9af1e9fc9b4b75d4bc6 | import uuid
from datetime import datetime, timedelta
from app import db, encryption
from app.models import ApiKey
from app.dao.dao_utils import (
transactional,
version_class
)
from sqlalchemy import or_, func
from sqlalchemy.orm import joinedload
@transactional
@version_class(ApiKey)
def save_model_api_key(api_key):
if not api_key.id:
api_key.id = uuid.uuid4() # must be set now so version history model can use same id
api_key.secret = uuid.uuid4()
db.session.add(api_key)
@transactional
@version_class(ApiKey)
def expire_api_key(service_id, api_key_id):
api_key = ApiKey.query.filter_by(id=api_key_id, service_id=service_id).one()
api_key.expiry_date = datetime.utcnow()
db.session.add(api_key)
def get_api_key_by_secret(secret):
return db.on_reader().query(ApiKey).filter_by(
_secret=encryption.encrypt(str(secret))
).options(joinedload('service')).one()
def get_model_api_keys(service_id, id=None):
if id:
return ApiKey.query.filter_by(id=id, service_id=service_id, expiry_date=None).one()
seven_days_ago = datetime.utcnow() - timedelta(days=7)
return ApiKey.query.filter(
or_(ApiKey.expiry_date == None, func.date(ApiKey.expiry_date) > seven_days_ago), # noqa
ApiKey.service_id == service_id
).all()
def get_unsigned_secrets(service_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_keys = ApiKey.query.filter_by(service_id=service_id, expiry_date=None).all()
keys = [x.secret for x in api_keys]
return keys
def get_unsigned_secret(key_id):
"""
This method can only be exposed to the Authentication of the api calls.
"""
api_key = ApiKey.query.filter_by(id=key_id, expiry_date=None).one()
return api_key.secret
|
py | 1a35b0c5d04c8693bb0287ad2ca52b554f547915 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from rapidsms.models import Contact, Connection
@python_2_unicode_compatible
class Message(models.Model):
INCOMING = "I"
OUTGOING = "O"
DIRECTION_CHOICES = (
(INCOMING, "Incoming"),
(OUTGOING, "Outgoing"),
)
contact = models.ForeignKey(Contact, blank=True, null=True)
connection = models.ForeignKey(Connection, blank=True, null=True)
direction = models.CharField(max_length=1, choices=DIRECTION_CHOICES)
date = models.DateTimeField()
text = models.TextField()
class Meta:
app_label = 'messagelog'
def save(self, *args, **kwargs):
"""
Verifies that one (not both) of the contact or connection fields
have been populated (raising ValidationError if not), and saves
the object as usual.
"""
if self.contact is None and self.connection is None:
raise ValidationError("A valid (not null) contact or connection "
"(but not both) must be provided to save the object.")
elif self.connection and self.contact and \
(self.contact != self.connection.contact):
raise ValidationError("The connection and contact you tried to "
"save did not match! You need to pick one or the other.")
if self.connection and self.connection.contact is not None:
# set the contact here as well, even if they didn't
# do it explicitly. If the contact's number changes
# we still might want to know who it originally came
# in from.
self.contact = self.connection.contact
super(Message, self).save(*args, **kwargs)
@property
def who(self):
"""Returns the Contact or Connection linked to this object."""
return self.contact or self.connection
def __str__(self):
# crop the text (to avoid exploding the admin)
text = self.text if len(self.text) < 60 else "%s..." % self.text[0:57]
direction = "to" if self.direction == self.INCOMING else "from"
return "%s (%s %s)" % (text, direction, self.who)
|
py | 1a35b0fa894d2c9f824c09ab4fe5be45dc30b264 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.providers.apache.druid.operators.druid`."""
import warnings
# pylint: disable=unused-import
from airflow.providers.apache.druid.operators.druid import DruidOperator # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.providers.apache.druid.operators.druid`.",
DeprecationWarning,
stacklevel=2,
)
|
py | 1a35b1fed8504297eeb357d34da72d1a1e66d57f | from typing import FrozenSet, Tuple
import pysmt.typing as types
from pysmt.environment import Environment as PysmtEnv
from pysmt.fnode import FNode
from utils import symb_to_next
from hint import Hint, Location
def transition_system(env: PysmtEnv) -> Tuple[FrozenSet[FNode], FNode, FNode,
FNode]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
symbols = frozenset([pc, x, y, z])
n_locs = 5
int_bound = n_locs
pcs = []
x_pcs = []
ints = [mgr.Int(i) for i in range(int_bound)]
for l in range(n_locs):
n = ints[l]
pcs.append(mgr.Equals(pc, n))
x_pcs.append(mgr.Equals(x_pc, n))
m_1 = mgr.Int(-1)
pcend = mgr.Equals(pc, m_1)
x_pcend = mgr.Equals(x_pc, m_1)
# initial location.
init = pcs[0]
# control flow graph.
cfg = mgr.And(
# pc = -1 : -1,
mgr.Implies(pcend, x_pcend),
# pc = 0 & !(y >= 1) : -1,
mgr.Implies(mgr.And(pcs[0], mgr.Not(mgr.GE(y, ints[1]))), x_pcend),
# pc = 0 & y >= 1 : 1,
mgr.Implies(mgr.And(pcs[0], mgr.GE(y, ints[1])), x_pcs[1]),
# pc = 1 & !(z >= 1) : -1,
mgr.Implies(mgr.And(pcs[1], mgr.Not(mgr.GE(z, ints[1]))), x_pcend),
# pc = 1 & z >= 1 : 2,
mgr.Implies(mgr.And(pcs[1], mgr.GE(z, ints[1])), x_pcs[2]),
# pc = 2 & !(x >= 0) : -1,
mgr.Implies(mgr.And(pcs[2], mgr.Not(mgr.GE(x, ints[0]))), x_pcend),
# pc = 2 & x >= 0 : 3,
mgr.Implies(mgr.And(pcs[2], mgr.GE(x, ints[0])), x_pcs[3]),
# pc = 3 : 4,
mgr.Implies(pcs[3], x_pcs[4]),
# pc = 4 : 2,
mgr.Implies(pcs[4], x_pcs[2]))
# transition labels.
labels = mgr.And(
# (pc = -1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcend, x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 0 & pc' = 1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[0], x_pcs[1]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 1 & pc' = 2) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[1], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = -1) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcend),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 2 & pc' = 3) -> (x' = x & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[2], x_pcs[3]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, y),
mgr.Equals(x_z, z))),
# (pc = 3 & pc' = 4) -> (x' = y*z - 1 & y' = y & z' = z),
mgr.Implies(
mgr.And(pcs[3], x_pcs[4]),
mgr.And(mgr.Equals(x_x, mgr.Minus(mgr.Times(y, z), ints[1])),
mgr.Equals(x_y, y), mgr.Equals(x_z, z))),
# (pc = 4 & pc' = 2) -> (x' = x & y' = y+1 & z' = z),
mgr.Implies(
mgr.And(pcs[4], x_pcs[2]),
mgr.And(mgr.Equals(x_x, x), mgr.Equals(x_y, mgr.Plus(y, ints[1])),
mgr.Equals(x_z, z))))
# transition relation.
trans = mgr.And(cfg, labels)
# fairness.
fairness = mgr.Not(pcend)
return symbols, init, trans, fairness
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
pc = mgr.Symbol("pc", types.INT)
x = mgr.Symbol("x", types.INT)
y = mgr.Symbol("y", types.INT)
z = mgr.Symbol("z", types.INT)
symbs = frozenset([pc, x, y, z])
x_pc = symb_to_next(mgr, pc)
x_x = symb_to_next(mgr, x)
x_y = symb_to_next(mgr, y)
x_z = symb_to_next(mgr, z)
res = []
i_0 = mgr.Int(0)
i_1 = mgr.Int(1)
i_2 = mgr.Int(2)
i_3 = mgr.Int(3)
loc0 = Location(env, mgr.GE(z, i_3), mgr.GE(y, i_0))
loc0.set_progress(1, mgr.Equals(x_z, y))
loc1 = Location(env, mgr.GE(z, i_0), mgr.GE(x, i_3))
loc1.set_progress(0, mgr.GE(x_z, mgr.Plus(z, x)))
h_z = Hint("h_z3", env, frozenset([z]), symbs)
h_z.set_locs([loc0, loc1])
res.append(h_z)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)),
stutterT=stutter)
loc.set_progress(0, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_2)))
h_x = Hint("h_x1", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.GT(x, i_3), mgr.And(mgr.GT(y, i_1), mgr.GT(z, i_1)))
loc0.set_progress(1, mgr.GE(x_x, mgr.Minus(mgr.Times(y, z), i_1)))
loc1 = Location(env, mgr.GT(x, i_0), mgr.GE(y, i_1))
loc1.set_progress(2, mgr.Equals(x_x, mgr.Plus(x, y)))
loc2 = Location(env, mgr.GT(x, i_3))
loc2.set_progress(2, mgr.Equals(x_x, x))
h_x = Hint("h_x4", env, frozenset([x]), symbs)
h_x.set_locs([loc0, loc1, loc2])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_2))
loc0.set_progress(1, mgr.GT(x_pc, i_2))
loc1 = Location(env, mgr.GE(pc, i_3))
loc1.set_progress(2, mgr.GE(x_pc, i_3))
loc2 = Location(env, mgr.GE(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_2))
h_pc = Hint("h_pc4", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
stutter = mgr.Equals(x_x, x)
loc = Location(env, mgr.GT(x, i_0), mgr.GT(y, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_x, mgr.Times(x, y)))
h_x = Hint("h_x0", env, frozenset([x]), symbs)
h_x.set_locs([loc])
res.append(h_x)
loc0 = Location(env, mgr.Equals(pc, i_1))
loc0.set_progress(1, mgr.Equals(x_pc, i_2))
loc1 = Location(env, mgr.Equals(pc, i_2))
loc1.set_progress(2, mgr.Equals(x_pc, i_3))
loc2 = Location(env, mgr.Equals(pc, i_3))
loc2.set_progress(0, mgr.Equals(x_pc, i_1))
h_pc = Hint("h_pc0", env, frozenset([pc]), symbs)
h_pc.set_locs([loc0, loc1, loc2])
res.append(h_pc)
stutter = mgr.Equals(x_y, y)
loc = Location(env, mgr.TRUE(), mgr.LE(x, i_0), stutterT=stutter)
loc.set_progress(0, mgr.Equals(x_y, mgr.Minus(y, i_1)))
h_y = Hint("h_y0", env, frozenset([y]), symbs)
h_y.set_locs([loc])
res.append(h_y)
return frozenset(res)
|
py | 1a35b3f6de9d6ea2e2425efbf97f147353bd5e11 | import re
from streamlink.plugin import Plugin, pluginmatcher
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream
@pluginmatcher(re.compile(
r"https?://live\.line\.me/channels/(?P<channel>\d+)/broadcast/(?P<broadcast>\d+)"
))
class LineLive(Plugin):
_api_url = "https://live-api.line-apps.com/app/v3.2/channel/{0}/broadcast/{1}/player_status"
_player_status_schema = validate.Schema(
{
"liveStatus": validate.text,
"liveHLSURLs": validate.any(None, {
"720": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"480": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"360": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"240": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"144": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
}),
"archivedHLSURLs": validate.any(None, {
"720": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"480": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"360": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"240": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
"144": validate.any(None, validate.url(scheme="http", path=validate.endswith(".m3u8"))),
}),
})
def _get_live_streams(self, json):
for stream in json["liveHLSURLs"]:
url = json["liveHLSURLs"][stream]
if url is not None:
yield "{0}p.".format(stream), HLSStream(self.session, url)
def _get_vod_streams(self, json):
for stream in json["archivedHLSURLs"]:
url = json["archivedHLSURLs"][stream]
if url is not None:
yield "{0}p.".format(stream), HLSStream(self.session, url)
def _get_streams(self):
channel = self.match.group("channel")
broadcast = self.match.group("broadcast")
res = self.session.http.get(self._api_url.format(channel, broadcast))
json = self.session.http.json(res, schema=self._player_status_schema)
if json["liveStatus"] == "LIVE":
return self._get_live_streams(json)
elif json["liveStatus"] == "FINISHED":
return self._get_vod_streams(json)
return
__plugin__ = LineLive
|
py | 1a35b4358fe5ef267ddba8555c45722ce861e035 | import os
import re
from bentoml.service import BentoServiceArtifact
JSON_ARTIFACT_EXTENSION = ".json"
class JSONArtifact(BentoServiceArtifact):
"""Abstraction for saving/loading objects to/from JSON files.
Args:
name (str): Name of the artifact
encoding (:obj:`str`, optional): The encoding will be used for saving/loading
text. Defaults to "utf8"
json_module (module|object, optional): Namespace/object implementing `loads()`
and `dumps()` methods for serializing/deserializing to/from JSON string.
Defaults to stdlib's json module.
"""
def __init__(self, name, encoding="utf-8", json_module=None):
super().__init__(name)
self._content = None
self._json_dumps_kwargs = None
self._encoding = encoding
if json_module:
self.json_module = json_module
else:
import json
self.json_module = json
def _file_path(self, base_path):
return os.path.join(
base_path,
re.sub("[^-a-zA-Z0-9_.() ]+", "", self.name) + JSON_ARTIFACT_EXTENSION,
)
def load(self, path):
with open(self._file_path(path), "rt", encoding=self._encoding) as fp:
content = self.json_module.loads(fp.read())
return self.pack(content)
def pack(
self, content, metadata=None, **json_dumps_kwargs
): # pylint:disable=arguments-renamed
self._content = content
self._json_dumps_kwargs = json_dumps_kwargs
return self
def get(self):
return self._content
def save(self, dst):
with open(self._file_path(dst), "wt", encoding=self._encoding) as fp:
fp.write(self.json_module.dumps(self._content, **self._json_dumps_kwargs))
|
py | 1a35b45965c2abaa53e31404f8a345a6c4b9d4ec | import pytest
from skiski.ski import S, K, I
from skiski.lib import B, R
def test_composite_function():
a = lambda x: x * 5
b = lambda x: x - 3
assert B(a).dot(b).dot(5).w() == 10
def test_sksk_is_b():
a = lambda x: x * 5
b = lambda x: x - 3
b_comb = B(a).dot(b).dot(5).w()
sksk = B.to_ski().dot(a).w().dot(b).dot(5).w()
assert b_comb == sksk
def test_reverse_composite_function():
a = lambda x: x * 5
assert R(5).dot(a).w() == 25
def test_sksik_is_r():
a = lambda x: x * 5
r_comb = R(5).dot(a).w()
sksik = S(K(S(I))).dot(K).dot(5).w().dot(a).w()
assert r_comb == sksik
|
py | 1a35b596e0a530c30a9c7d159be6b5060fc4b221 | #!/usr/bin/env python3
# Day 15: Maximum Sum Circular Subarray
#
# Given a circular array C of integers represented by A, find the maximum
# possible sum of a non-empty subarray of C.
# Here, a circular array means the end of the array connects to the beginning
# of the array. (Formally, C[i] = A[i] when 0 <= i < A.length, and
# C[i+A.length] = C[i] when i >= 0.)
# Also, a subarray may only include each element of the fixed buffer A at most
# once. (Formally, for a subarray C[i], C[i+1], ..., C[j], there does not
# exist i <= k1, k2 <= j with k1 % A.length = k2 % A.length.)
#
# Notes:
# - -30000 <= A[i] <= 30000
# - 1 <= A.length <= 30000
class Solution:
def maxSubarraySum(self, numbers: [int]) -> int:
best = float("-inf")
current = 0
for number in numbers:
current += number
if best < current:
best = current
if current < 0:
current = 0
return best
def maxSubarraySumCircular(self, A: [int]) -> int:
total = sum(A)
inverted = [-number for number in A]
best_contiguous = self.maxSubarraySum(A)
best_inverted = self.maxSubarraySum(inverted)
if best_inverted == -total:
return best_contiguous
else:
return max(best_contiguous, total + best_inverted)
# Tests
assert Solution().maxSubarraySumCircular([1,-2,3,-2]) == 3
assert Solution().maxSubarraySumCircular([5,-3,5]) == 10
assert Solution().maxSubarraySumCircular([3,-1,2,-1]) == 4
assert Solution().maxSubarraySumCircular([3,-2,2,-3]) == 3
assert Solution().maxSubarraySumCircular([-2,-3,-1]) == -1
|
py | 1a35b5cb5d46d7e02c3b570412d4fb94d1b60034 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.KoubeiMarketingCampaignMemberAuthApplyModel import KoubeiMarketingCampaignMemberAuthApplyModel
class KoubeiMarketingCampaignMemberAuthApplyRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._biz_content = None
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def biz_content(self):
return self._biz_content
@biz_content.setter
def biz_content(self, value):
if isinstance(value, KoubeiMarketingCampaignMemberAuthApplyModel):
self._biz_content = value
else:
self._biz_content = KoubeiMarketingCampaignMemberAuthApplyModel.from_alipay_dict(value)
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'koubei.marketing.campaign.member.auth.apply'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.biz_content:
if hasattr(self.biz_content, 'to_alipay_dict'):
params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
else:
params['biz_content'] = self.biz_content
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
|
py | 1a35b5ebb98d8d364a73b1e4d86ee676cd0db9cc | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import Counter
#
# Complete the 'missingNumbers' function below.
#
# The function is expected to return an INTEGER_ARRAY.
# The function accepts following parameters:
# 1. INTEGER_ARRAY arr
# 2. INTEGER_ARRAY brr
#
def missingNumbers(arr, brr):
# Write your code here
acount = Counter(arr)
bcount = Counter(brr)
for el in acount.items():
get = bcount.get(el[0])
if get:
bcount[el[0]] -= el[1]
bcount = list(map(lambda x: x[0], (filter(lambda x: x[1] > 0, bcount.items()))))
bcount = sorted(bcount)
return bcount
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
n = int(input().strip())
arr = list(map(int, input().rstrip().split()))
m = int(input().strip())
brr = list(map(int, input().rstrip().split()))
result = missingNumbers(arr, brr)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close()
|
py | 1a35b61f4a938cf8bc06f53f4f9619aaa1d33712 | # -*- coding: utf-8 -*-
from captcha.conf import settings
from captcha.fields import CaptchaField, CaptchaTextInput
from captcha.models import CaptchaStore, get_safe_now
from django.conf import settings as django_settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.utils.translation import ugettext_lazy
import datetime
import json
import re
import six
import os
class CaptchaCase(TestCase):
urls = 'captcha.tests.urls'
def setUp(self):
self.stores = {}
self.__current_settings_output_format = settings.CAPTCHA_OUTPUT_FORMAT
self.__current_settings_dictionary = settings.CAPTCHA_WORDS_DICTIONARY
self.__current_settings_punctuation = settings.CAPTCHA_PUNCTUATION
tested_helpers = ['captcha.helpers.math_challenge', 'captcha.helpers.random_char_challenge', 'captcha.helpers.unicode_challenge']
if os.path.exists('/usr/share/dict/words'):
settings.CAPTCHA_WORDS_DICTIONARY = '/usr/share/dict/words'
settings.CAPTCHA_PUNCTUATION = ';-,.'
tested_helpers.append('captcha.helpers.word_challenge')
tested_helpers.append('captcha.helpers.huge_words_and_punctuation_challenge')
for helper in tested_helpers:
challenge, response = settings._callable_from_string(helper)()
self.stores[helper.rsplit('.', 1)[-1].replace('_challenge', '_store')], _ = CaptchaStore.objects.get_or_create(challenge=challenge, response=response)
challenge, response = settings.get_challenge()()
self.stores['default_store'], _ = CaptchaStore.objects.get_or_create(challenge=challenge, response=response)
self.default_store = self.stores['default_store']
def tearDown(self):
settings.CAPTCHA_OUTPUT_FORMAT = self.__current_settings_output_format
settings.CAPTCHA_WORDS_DICTIONARY = self.__current_settings_dictionary
settings.CAPTCHA_PUNCTUATION = self.__current_settings_punctuation
def __extract_hash_and_response(self, r):
hash_ = re.findall(r'value="([0-9a-f]+)"', str(r.content))[0]
response = CaptchaStore.objects.get(hashkey=hash_).response
return hash_, response
def testImages(self):
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse('captcha-image', kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertTrue(response.has_header('content-type'))
self.assertEqual(response._headers.get('content-type'), ('Content-Type', 'image/png'))
def testAudio(self):
if not settings.CAPTCHA_FLITE_PATH:
return
for key in (self.stores.get('math_store').hashkey, self.stores.get('math_store').hashkey, self.default_store.hashkey):
response = self.client.get(reverse('captcha-audio', kwargs=dict(key=key)))
self.assertEqual(response.status_code, 200)
self.assertTrue(len(response.content) > 1024)
self.assertTrue(response.has_header('content-type'))
self.assertEqual(response._headers.get('content-type'), ('Content-Type', 'audio/x-wav'))
def testFormSubmit(self):
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertFalse(str(r.content).find('Form validated') > 0)
def testFormModelForm(self):
r = self.client.get(reverse('captcha-test-model-form'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test-model-form'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
r = self.client.post(reverse('captcha-test-model-form'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertFalse(str(r.content).find('Form validated') > 0)
def testWrongSubmit(self):
for urlname in ('captcha-test', 'captcha-test-model-form'):
r = self.client.get(reverse(urlname))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse(urlname), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='[email protected]'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
def testDeleteExpired(self):
self.default_store.expiration = get_safe_now() - datetime.timedelta(minutes=5)
self.default_store.save()
hash_ = self.default_store.hashkey
r = self.client.post(reverse('captcha-test'), dict(captcha_0=hash_, captcha_1=self.default_store.response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertFalse('Form validated' in str(r.content))
# expired -> deleted
try:
CaptchaStore.objects.get(hashkey=hash_)
self.fail()
except:
pass
def testCustomErrorMessage(self):
r = self.client.get(reverse('captcha-test-custom-error-message'))
self.assertEqual(r.status_code, 200)
# Wrong answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc', captcha_1='wrong response'))
self.assertFormError(r, 'form', 'captcha', 'TEST CUSTOM ERROR MESSAGE')
# empty answer
r = self.client.post(reverse('captcha-test-custom-error-message'), dict(captcha_0='abc', captcha_1=''))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('This field is required.'))
def testRepeatedChallenge(self):
CaptchaStore.objects.create(challenge='xxx', response='xxx')
try:
CaptchaStore.objects.create(challenge='xxx', response='xxx')
except Exception:
self.fail()
def testRepeatedChallengeFormSubmit(self):
__current_challange_function = settings.CAPTCHA_CHALLENGE_FUNCT
for urlname in ('captcha-test', 'captcha-test-model-form'):
settings.CAPTCHA_CHALLENGE_FUNCT = 'captcha.tests.trivial_challenge'
r1 = self.client.get(reverse(urlname))
r2 = self.client.get(reverse(urlname))
self.assertEqual(r1.status_code, 200)
self.assertEqual(r2.status_code, 200)
if re.findall(r'value="([0-9a-f]+)"', str(r1.content)):
hash_1 = re.findall(r'value="([0-9a-f]+)"', str(r1.content))[0]
else:
self.fail()
if re.findall(r'value="([0-9a-f]+)"', str(r2.content)):
hash_2 = re.findall(r'value="([0-9a-f]+)"', str(r2.content))[0]
else:
self.fail()
try:
store_1 = CaptchaStore.objects.get(hashkey=hash_1)
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
self.assertTrue(store_1.pk != store_2.pk)
self.assertTrue(store_1.response == store_2.response)
self.assertTrue(hash_1 != hash_2)
r1 = self.client.post(reverse(urlname), dict(captcha_0=hash_1, captcha_1=store_1.response, subject='xxx', sender='[email protected]'))
self.assertEqual(r1.status_code, 200)
self.assertTrue(str(r1.content).find('Form validated') > 0)
try:
store_2 = CaptchaStore.objects.get(hashkey=hash_2)
except:
self.fail()
r2 = self.client.post(reverse(urlname), dict(captcha_0=hash_2, captcha_1=store_2.response, subject='xxx', sender='[email protected]'))
self.assertEqual(r2.status_code, 200)
self.assertTrue(str(r2.content).find('Form validated') > 0)
settings.CAPTCHA_CHALLENGE_FUNCT = __current_challange_function
def testOutputFormat(self):
for urlname in ('captcha-test', 'captcha-test-model-form'):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s<p>Hello, captcha world</p>%(hidden_field)s%(text_field)s'
r = self.client.get(reverse(urlname))
self.assertEqual(r.status_code, 200)
self.assertTrue('<p>Hello, captcha world</p>' in str(r.content))
def testInvalidOutputFormat(self):
__current_settings_debug = django_settings.DEBUG
for urlname in ('captcha-test', 'captcha-test-model-form'):
# we turn on DEBUG because CAPTCHA_OUTPUT_FORMAT is only checked debug
django_settings.DEBUG = True
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s'
try:
self.client.get(reverse(urlname))
self.fail()
except ImproperlyConfigured as e:
self.assertTrue('CAPTCHA_OUTPUT_FORMAT' in str(e))
django_settings.DEBUG = __current_settings_debug
def testPerFormFormat(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s testCustomFormatString %(hidden_field)s %(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.assertTrue('testCustomFormatString' in str(r.content))
r = self.client.get(reverse('test_per_form_format'))
self.assertTrue('testPerFieldCustomFormatString' in str(r.content))
def testIssue31ProperLabel(self):
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s %(hidden_field)s %(text_field)s'
r = self.client.get(reverse('captcha-test'))
self.assertTrue('<label for="id_captcha_1"' in str(r.content))
def testRefreshView(self):
r = self.client.get(reverse('captcha-refresh'), HTTP_X_REQUESTED_WITH='XMLHttpRequest')
try:
new_data = json.loads(six.text_type(r.content, encoding='ascii'))
self.assertTrue('image_url' in new_data)
except:
self.fail()
def testContentLength(self):
for key in [store.hashkey for store in six.itervalues(self.stores)]:
response = self.client.get(reverse('captcha-image', kwargs=dict(key=key)))
self.assertTrue(response.has_header('content-length'))
self.assertTrue(response['content-length'].isdigit())
self.assertTrue(int(response['content-length']))
def testIssue12ProperInstantiation(self):
"""
This test covers a default django field and widget behavior
It not assert anything. If something is wrong it will raise a error!
"""
settings.CAPTCHA_OUTPUT_FORMAT = u'%(image)s %(hidden_field)s %(text_field)s'
widget = CaptchaTextInput(attrs={'class': 'required'})
CaptchaField(widget=widget)
def testTestMode_Issue15(self):
__current_test_mode_setting = settings.CAPTCHA_TEST_MODE
settings.CAPTCHA_TEST_MODE = False
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='[email protected]'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
settings.CAPTCHA_TEST_MODE = True
# Test mode, only 'PASSED' is accepted
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='wrong response', subject='xxx', sender='[email protected]'))
self.assertFormError(r, 'form', 'captcha', ugettext_lazy('Invalid CAPTCHA'))
r = self.client.get(reverse('captcha-test'))
self.assertEqual(r.status_code, 200)
r = self.client.post(reverse('captcha-test'), dict(captcha_0='abc', captcha_1='passed', subject='xxx', sender='[email protected]'))
self.assertTrue(str(r.content).find('Form validated') > 0)
settings.CAPTCHA_TEST_MODE = __current_test_mode_setting
def test_get_version(self):
import captcha
captcha.get_version(True)
def test_missing_value(self):
r = self.client.get(reverse('captcha-test-non-required'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
# Empty response is okay when required is False
r = self.client.post(reverse('captcha-test-non-required'), dict(subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
# But a valid response is okay, too
r = self.client.get(reverse('captcha-test-non-required'))
self.assertEqual(r.status_code, 200)
hash_, response = self.__extract_hash_and_response(r)
r = self.client.post(reverse('captcha-test-non-required'), dict(captcha_0=hash_, captcha_1=response, subject='xxx', sender='[email protected]'))
self.assertEqual(r.status_code, 200)
self.assertTrue(str(r.content).find('Form validated') > 0)
def test_autocomplete_off(self):
r = self.client.get(reverse('captcha-test'))
self.assertTrue('autocomplete="off"' in six.text_type(r.content))
def trivial_challenge():
return 'trivial', 'trivial'
|
py | 1a35b67cc292c5940ad16e2d6b2493a45281cd12 | import asyncio
import aiosqlite
import copy
from typing import Dict
from aiosqlite.core import Connection
class DBWrapper:
"""
This object handles HeaderBlocks and Blocks stored in DB used by wallet.
"""
db: Dict[str,aiosqlite.Connection]
lock: asyncio.Lock
def __init__(self, connection: Dict[str,aiosqlite.Connection]):
self.db = dict()
self.db = connection
self.lock = asyncio.Lock()
async def begin_transaction(self, str="chia"):
cursor = await self.db[str].execute("BEGIN TRANSACTION")
await cursor.close()
async def rollback_transaction(self,str="chia"):
# Also rolls back the coin store, since both stores must be updated at once
if self.db[str].in_transaction:
cursor = await self.db[str].execute("ROLLBACK")
await cursor.close()
async def commit_transaction(self,str="chia"):
if isinstance(self.db[str], aiosqlite.Connection):
await self.db[str].commit()
|
py | 1a35b80351aef6f04f693b86bffe493c7a7b9203 | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import os.path as osp
import warnings
from argparse import ArgumentParser
import cv2
import mmcv
import numpy as np
from mmpose.apis import (collect_multi_frames, extract_pose_sequence,
get_track_id, inference_pose_lifter_model,
inference_top_down_pose_model, init_pose_model,
process_mmdet_results, vis_3d_pose_result)
from mmpose.core import Smoother
from mmpose.datasets import DatasetInfo
from mmpose.models import PoseLifter, TopDown
try:
from mmdet.apis import inference_detector, init_detector
has_mmdet = True
except (ImportError, ModuleNotFoundError):
has_mmdet = False
def convert_keypoint_definition(keypoints, pose_det_dataset,
pose_lift_dataset):
"""Convert pose det dataset keypoints definition to pose lifter dataset
keypoints definition.
Args:
keypoints (ndarray[K, 2 or 3]): 2D keypoints to be transformed.
pose_det_dataset, (str): Name of the dataset for 2D pose detector.
pose_lift_dataset (str): Name of the dataset for pose lifter model.
"""
coco_style_datasets = [
'TopDownCocoDataset', 'TopDownPoseTrack18Dataset',
'TopDownPoseTrack18VideoDataset'
]
if pose_det_dataset == 'TopDownH36MDataset' and \
pose_lift_dataset == 'Body3DH36MDataset':
return keypoints
elif pose_det_dataset in coco_style_datasets and \
pose_lift_dataset == 'Body3DH36MDataset':
keypoints_new = np.zeros((17, keypoints.shape[1]))
# pelvis is in the middle of l_hip and r_hip
keypoints_new[0] = (keypoints[11] + keypoints[12]) / 2
# thorax is in the middle of l_shoulder and r_shoulder
keypoints_new[8] = (keypoints[5] + keypoints[6]) / 2
# in COCO, head is in the middle of l_eye and r_eye
# in PoseTrack18, head is in the middle of head_bottom and head_top
keypoints_new[10] = (keypoints[1] + keypoints[2]) / 2
# spine is in the middle of thorax and pelvis
keypoints_new[7] = (keypoints_new[0] + keypoints_new[8]) / 2
# rearrange other keypoints
keypoints_new[[1, 2, 3, 4, 5, 6, 9, 11, 12, 13, 14, 15, 16]] = \
keypoints[[12, 14, 16, 11, 13, 15, 0, 5, 7, 9, 6, 8, 10]]
return keypoints_new
else:
raise NotImplementedError
def main():
parser = ArgumentParser()
parser.add_argument('det_config', help='Config file for detection')
parser.add_argument('det_checkpoint', help='Checkpoint file for detection')
parser.add_argument(
'pose_detector_config',
type=str,
default=None,
help='Config file for the 1st stage 2D pose detector')
parser.add_argument(
'pose_detector_checkpoint',
type=str,
default=None,
help='Checkpoint file for the 1st stage 2D pose detector')
parser.add_argument(
'pose_lifter_config',
help='Config file for the 2nd stage pose lifter model')
parser.add_argument(
'pose_lifter_checkpoint',
help='Checkpoint file for the 2nd stage pose lifter model')
parser.add_argument(
'--video-path', type=str, default='', help='Video path')
parser.add_argument(
'--rebase-keypoint-height',
action='store_true',
help='Rebase the predicted 3D pose so its lowest keypoint has a '
'height of 0 (landing on the ground). This is useful for '
'visualization when the model do not predict the global position '
'of the 3D pose.')
parser.add_argument(
'--norm-pose-2d',
action='store_true',
help='Scale the bbox (along with the 2D pose) to the average bbox '
'scale of the dataset, and move the bbox (along with the 2D pose) to '
'the average bbox center of the dataset. This is useful when bbox '
'is small, especially in multi-person scenarios.')
parser.add_argument(
'--num-instances',
type=int,
default=-1,
help='The number of 3D poses to be visualized in every frame. If '
'less than 0, it will be set to the number of pose results in the '
'first frame.')
parser.add_argument(
'--show',
action='store_true',
default=False,
help='whether to show visualizations.')
parser.add_argument(
'--out-video-root',
type=str,
default='vis_results',
help='Root of the output video file. '
'Default not saving the visualization video.')
parser.add_argument(
'--device', default='cuda:0', help='Device for inference')
parser.add_argument(
'--det-cat-id',
type=int,
default=1,
help='Category id for bounding box detection model')
parser.add_argument(
'--bbox-thr',
type=float,
default=0.9,
help='Bounding box score threshold')
parser.add_argument('--kpt-thr', type=float, default=0.3)
parser.add_argument(
'--use-oks-tracking', action='store_true', help='Using OKS tracking')
parser.add_argument(
'--tracking-thr', type=float, default=0.3, help='Tracking threshold')
parser.add_argument(
'--radius',
type=int,
default=8,
help='Keypoint radius for visualization')
parser.add_argument(
'--thickness',
type=int,
default=2,
help='Link thickness for visualization')
parser.add_argument(
'--smooth',
action='store_true',
help='Apply a temporal filter to smooth the pose estimation results. '
'See also --smooth-filter-cfg.')
parser.add_argument(
'--smooth-filter-cfg',
type=str,
default='configs/_base_/filters/one_euro.py',
help='Config file of the filter to smooth the pose estimation '
'results. See also --smooth.')
parser.add_argument(
'--use-multi-frames',
action='store_true',
default=False,
help='whether to use multi frames for inference in the 2D pose'
'detection stage. Default: False.')
parser.add_argument(
'--online',
action='store_true',
default=False,
help='inference mode. If set to True, can not use future frame'
'information when using multi frames for inference in the 2D pose'
'detection stage. Default: False.')
assert has_mmdet, 'Please install mmdet to run the demo.'
args = parser.parse_args()
assert args.show or (args.out_video_root != '')
assert args.det_config is not None
assert args.det_checkpoint is not None
video = mmcv.VideoReader(args.video_path)
assert video.opened, f'Failed to load video file {args.video_path}'
# First stage: 2D pose detection
print('Stage 1: 2D pose detection.')
print('Initializing model...')
person_det_model = init_detector(
args.det_config, args.det_checkpoint, device=args.device.lower())
pose_det_model = init_pose_model(
args.pose_detector_config,
args.pose_detector_checkpoint,
device=args.device.lower())
assert isinstance(pose_det_model, TopDown), 'Only "TopDown"' \
'model is supported for the 1st stage (2D pose detection)'
# frame index offsets for inference, used in multi-frame inference setting
if args.use_multi_frames:
assert 'frame_indices_test' in pose_det_model.cfg.data.test.data_cfg
indices = pose_det_model.cfg.data.test.data_cfg['frame_indices_test']
pose_det_dataset = pose_det_model.cfg.data['test']['type']
# get datasetinfo
dataset_info = pose_det_model.cfg.data['test'].get('dataset_info', None)
if dataset_info is None:
warnings.warn(
'Please set `dataset_info` in the config.'
'Check https://github.com/open-mmlab/mmpose/pull/663 for details.',
DeprecationWarning)
else:
dataset_info = DatasetInfo(dataset_info)
pose_det_results_list = []
next_id = 0
pose_det_results = []
# whether to return heatmap, optional
return_heatmap = False
# return the output of some desired layers,
# e.g. use ('backbone', ) to return backbone feature
output_layer_names = None
print('Running 2D pose detection inference...')
for frame_id, cur_frame in enumerate(mmcv.track_iter_progress(video)):
pose_det_results_last = pose_det_results
# test a single image, the resulting box is (x1, y1, x2, y2)
mmdet_results = inference_detector(person_det_model, cur_frame)
# keep the person class bounding boxes.
person_det_results = process_mmdet_results(mmdet_results,
args.det_cat_id)
if args.use_multi_frames:
frames = collect_multi_frames(video, frame_id, indices,
args.online)
# make person results for current image
pose_det_results, _ = inference_top_down_pose_model(
pose_det_model,
frames if args.use_multi_frames else cur_frame,
person_det_results,
bbox_thr=args.bbox_thr,
format='xyxy',
dataset=pose_det_dataset,
dataset_info=dataset_info,
return_heatmap=return_heatmap,
outputs=output_layer_names)
# get track id for each person instance
pose_det_results, next_id = get_track_id(
pose_det_results,
pose_det_results_last,
next_id,
use_oks=args.use_oks_tracking,
tracking_thr=args.tracking_thr)
pose_det_results_list.append(copy.deepcopy(pose_det_results))
# Second stage: Pose lifting
print('Stage 2: 2D-to-3D pose lifting.')
print('Initializing model...')
pose_lift_model = init_pose_model(
args.pose_lifter_config,
args.pose_lifter_checkpoint,
device=args.device.lower())
assert isinstance(pose_lift_model, PoseLifter), \
'Only "PoseLifter" model is supported for the 2nd stage ' \
'(2D-to-3D lifting)'
pose_lift_dataset = pose_lift_model.cfg.data['test']['type']
if args.out_video_root == '':
save_out_video = False
else:
os.makedirs(args.out_video_root, exist_ok=True)
save_out_video = True
if save_out_video:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
fps = video.fps
writer = None
# convert keypoint definition
for pose_det_results in pose_det_results_list:
for res in pose_det_results:
keypoints = res['keypoints']
res['keypoints'] = convert_keypoint_definition(
keypoints, pose_det_dataset, pose_lift_dataset)
# load temporal padding config from model.data_cfg
if hasattr(pose_lift_model.cfg, 'test_data_cfg'):
data_cfg = pose_lift_model.cfg.test_data_cfg
else:
data_cfg = pose_lift_model.cfg.data_cfg
# build pose smoother for temporal refinement
if args.smooth:
smoother = Smoother(filter_cfg=args.smooth_filter_cfg, keypoint_dim=3)
else:
smoother = None
num_instances = args.num_instances
print('Running 2D-to-3D pose lifting inference...')
for i, pose_det_results in enumerate(
mmcv.track_iter_progress(pose_det_results_list)):
# extract and pad input pose2d sequence
pose_results_2d = extract_pose_sequence(
pose_det_results_list,
frame_idx=i,
causal=data_cfg.causal,
seq_len=data_cfg.seq_len,
step=data_cfg.seq_frame_interval)
# 2D-to-3D pose lifting
pose_lift_results = inference_pose_lifter_model(
pose_lift_model,
pose_results_2d=pose_results_2d,
dataset=pose_lift_dataset,
with_track_id=True,
image_size=video.resolution,
norm_pose_2d=args.norm_pose_2d)
# Pose processing
pose_lift_results_vis = []
for idx, res in enumerate(pose_lift_results):
keypoints_3d = res['keypoints_3d']
# exchange y,z-axis, and then reverse the direction of x,z-axis
keypoints_3d = keypoints_3d[..., [0, 2, 1]]
keypoints_3d[..., 0] = -keypoints_3d[..., 0]
keypoints_3d[..., 2] = -keypoints_3d[..., 2]
# rebase height (z-axis)
if args.rebase_keypoint_height:
keypoints_3d[..., 2] -= np.min(
keypoints_3d[..., 2], axis=-1, keepdims=True)
res['keypoints_3d'] = keypoints_3d
# add title
det_res = pose_det_results[idx]
instance_id = det_res['track_id']
res['title'] = f'Prediction ({instance_id})'
# only visualize the target frame
res['keypoints'] = det_res['keypoints']
res['bbox'] = det_res['bbox']
res['track_id'] = instance_id
pose_lift_results_vis.append(res)
# Smoothing
if smoother:
pose_lift_results = smoother.smooth(pose_lift_results)
# Visualization
if num_instances < 0:
num_instances = len(pose_lift_results_vis)
img_vis = vis_3d_pose_result(
pose_lift_model,
result=pose_lift_results_vis,
img=video[i],
out_file=None,
radius=args.radius,
thickness=args.thickness,
num_instances=num_instances)
if save_out_video:
if writer is None:
writer = cv2.VideoWriter(
osp.join(args.out_video_root,
f'vis_{osp.basename(args.video_path)}'), fourcc,
fps, (img_vis.shape[1], img_vis.shape[0]))
writer.write(img_vis)
if save_out_video:
writer.release()
if __name__ == '__main__':
main()
|
py | 1a35b85da04796235b9966df2c0530c36aff5858 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from __future__ import print_function
import binascii
import datetime
import errno
import json
import os
import os.path
import platform
import random
import re
import shutil
import ssl
import stat
import string
import subprocess
import sys
import tempfile
import threading
import time
import uuid
import webbrowser
from distutils.version import StrictVersion
from math import isnan
from six.moves.urllib.request import urlopen # pylint: disable=import-error
from six.moves.urllib.error import URLError # pylint: disable=import-error
# pylint: disable=import-error
import yaml
import dateutil.parser
from dateutil.relativedelta import relativedelta
from knack.log import get_logger
from knack.util import CLIError
from knack.prompting import prompt_pass, NoTTYException, prompt_y_n
from msrestazure.azure_exceptions import CloudError
import requests
# pylint: disable=no-name-in-module,import-error
from azure.cli.command_modules.acs import acs_client, proxy
from azure.cli.command_modules.acs._params import regions_in_preview, regions_in_prod
from azure.cli.core.api import get_config_dir
from azure.cli.core.azclierror import (ResourceNotFoundError,
ArgumentUsageError,
ClientRequestError,
InvalidArgumentValueError)
from azure.cli.core._profile import Profile
from azure.cli.core.commands.client_factory import get_mgmt_service_client, get_subscription_id
from azure.cli.core.keys import is_valid_ssh_rsa_public_key
from azure.cli.core.util import in_cloud_console, shell_safe_json_parse, truncate_text, sdk_no_wait
from azure.cli.core.commands import LongRunningOperation
from azure.graphrbac.models import (ApplicationCreateParameters,
ApplicationUpdateParameters,
PasswordCredential,
KeyCredential,
ServicePrincipalCreateParameters,
GetObjectsParameters,
ResourceAccess, RequiredResourceAccess)
from azure.mgmt.containerservice.models import ContainerServiceOrchestratorTypes
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceNetworkProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceLinuxProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterServicePrincipalProfile
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshConfiguration
from azure.mgmt.containerservice.v2020_09_01.models import ContainerServiceSshPublicKey
from azure.mgmt.containerservice.v2020_09_01.models import ManagedCluster
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAADProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAddonProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentity
from azure.mgmt.containerservice.v2020_09_01.models import AgentPool
from azure.mgmt.containerservice.v2020_09_01.models import AgentPoolUpgradeSettings
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterSKU
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterWindowsProfile
from azure.mgmt.containerservice.v2020_09_01.models import ManagedClusterIdentityUserAssignedIdentitiesValue
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAgentPoolProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftAgentPoolProfileRole
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAADIdentityProvider
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedCluster
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftRouterProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterAuthProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import NetworkProfile
from azure.mgmt.containerservice.v2019_09_30_preview.models import OpenShiftManagedClusterMonitorProfile
from ._client_factory import cf_container_services
from ._client_factory import cf_resource_groups
from ._client_factory import get_auth_management_client
from ._client_factory import get_graph_rbac_management_client
from ._client_factory import cf_resources
from ._client_factory import get_resource_by_name
from ._client_factory import cf_container_registry_service
from ._client_factory import cf_managed_clusters
from ._client_factory import get_msi_client
from ._helpers import (_populate_api_server_access_profile, _set_vm_set_type, _set_outbound_type,
_parse_comma_separated_list)
from ._loadbalancer import (set_load_balancer_sku, is_load_balancer_profile_provided,
update_load_balancer_profile, create_load_balancer_profile)
from ._consts import CONST_SCALE_SET_PRIORITY_REGULAR, CONST_SCALE_SET_PRIORITY_SPOT, CONST_SPOT_EVICTION_POLICY_DELETE
from ._consts import CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME
from ._consts import CONST_MONITORING_ADDON_NAME
from ._consts import CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID
from ._consts import CONST_VIRTUAL_NODE_ADDON_NAME
from ._consts import CONST_VIRTUAL_NODE_SUBNET_NAME
from ._consts import CONST_KUBE_DASHBOARD_ADDON_NAME
from ._consts import CONST_AZURE_POLICY_ADDON_NAME
from ._consts import ADDONS
logger = get_logger(__name__)
# pylint:disable=too-many-lines,unused-argument
def which(binary):
path_var = os.getenv('PATH')
if platform.system() == 'Windows':
binary = binary + '.exe'
parts = path_var.split(';')
else:
parts = path_var.split(':')
for part in parts:
bin_path = os.path.join(part, binary)
if os.path.exists(bin_path) and os.path.isfile(bin_path) and os.access(bin_path, os.X_OK):
return bin_path
return None
def wait_then_open(url):
"""
Waits for a bit then opens a URL. Useful for waiting for a proxy to come up, and then open the URL.
"""
for _ in range(1, 10):
try:
urlopen(url, context=_ssl_context())
except URLError:
time.sleep(1)
break
webbrowser.open_new_tab(url)
def wait_then_open_async(url):
"""
Spawns a thread that waits for a bit then opens a URL.
"""
t = threading.Thread(target=wait_then_open, args=({url}))
t.daemon = True
t.start()
def acs_browse(cmd, client, resource_group_name, name, disable_browser=False, ssh_key_file=None):
"""
Opens a browser to the web interface for the cluster orchestrator
:param name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: If set a path to an SSH key to use, only applies to DCOS
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file)
def _acs_browse_internal(cmd, client, acs_info, resource_group_name, name, disable_browser, ssh_key_file):
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
if str(orchestrator_type).lower() == 'kubernetes' or \
orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes or \
(acs_info.custom_profile and acs_info.custom_profile.orchestrator == 'kubernetes'): # pylint: disable=no-member
return k8s_browse(cmd, client, name, resource_group_name, disable_browser, ssh_key_file=ssh_key_file)
if str(orchestrator_type).lower() == 'dcos' or orchestrator_type == ContainerServiceOrchestratorTypes.dcos:
return _dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
raise CLIError('Unsupported orchestrator type {} for browse'.format(orchestrator_type))
def k8s_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Launch a proxy and browse the Kubernetes web UI.
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file)
def _k8s_browse_internal(name, acs_info, disable_browser, ssh_key_file):
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
browse_path = os.path.join(get_config_dir(), 'acsBrowseConfig.yaml')
if os.path.exists(browse_path):
os.remove(browse_path)
_k8s_get_credentials_internal(name, acs_info, browse_path, ssh_key_file, False)
logger.warning('Proxy running on 127.0.0.1:8001/ui')
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1:8001/ui')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy"])
def dcos_browse(cmd, client, name, resource_group_name, disable_browser=False, ssh_key_file=None):
"""
Creates an SSH tunnel to the Azure container service, and opens the Mesosphere DC/OS dashboard in the browser.
:param name: name: Name of the target Azure container service instance.
:type name: String
:param resource_group_name: Name of Azure container service's resource group.
:type resource_group_name: String
:param disable_browser: If true, don't launch a web browser after estabilishing the proxy
:type disable_browser: bool
:param ssh_key_file: Path to the SSH key to use
:type ssh_key_file: string
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_dcos_browse_internal(acs_info, disable_browser, ssh_key_file)
def _dcos_browse_internal(acs_info, disable_browser, ssh_key_file):
if not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
acs = acs_client.ACSClient()
if not acs.connect(_get_host_name(acs_info), _get_username(acs_info),
key_filename=ssh_key_file):
raise CLIError('Error connecting to ACS: {}'.format(_get_host_name(acs_info)))
octarine_bin = '/opt/mesosphere/bin/octarine'
if not acs.file_exists(octarine_bin):
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(octarine_bin))
proxy_id = _rand_str(16)
proxy_cmd = '{} {}'.format(octarine_bin, proxy_id)
acs.run(proxy_cmd, background=True)
# Parse the output to get the remote PORT
proxy_client_cmd = '{} --client --port {}'.format(octarine_bin, proxy_id)
stdout, _ = acs.run(proxy_client_cmd)
remote_port = int(stdout.read().decode().strip())
local_port = acs.get_available_local_port()
# Set the proxy
proxy.set_http_proxy('127.0.0.1', local_port)
logger.warning('Proxy running on 127.0.0.1:%s', local_port)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async('http://127.0.0.1')
try:
acs.create_tunnel(
remote_host='127.0.0.1',
remote_port=remote_port,
local_port=local_port)
finally:
proxy.disable_http_proxy()
def acs_install_cli(cmd, client, resource_group_name, name, install_location=None, client_version=None):
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
orchestrator_type = acs_info.orchestrator_profile.orchestrator_type # pylint: disable=no-member
kwargs = {'install_location': install_location}
if client_version:
kwargs['client_version'] = client_version
if orchestrator_type == 'kubernetes':
return k8s_install_cli(**kwargs)
if orchestrator_type == 'dcos':
return dcos_install_cli(**kwargs)
raise CLIError('Unsupported orchestrator type {} for install-cli'.format(orchestrator_type))
def _ssl_context():
if sys.version_info < (3, 4) or (in_cloud_console() and platform.system() == 'Windows'):
try:
return ssl.SSLContext(ssl.PROTOCOL_TLS) # added in python 2.7.13 and 3.6
except AttributeError:
return ssl.SSLContext(ssl.PROTOCOL_TLSv1)
return ssl.create_default_context()
def _urlretrieve(url, filename):
req = urlopen(url, context=_ssl_context())
with open(filename, "wb") as f:
f.write(req.read())
def _unzip(src, dest):
logger.debug('Extracting %s to %s.', src, dest)
system = platform.system()
if system in ('Linux', 'Darwin', 'Windows'):
import zipfile
with zipfile.ZipFile(src, 'r') as zipObj:
zipObj.extractall(dest)
else:
raise CLIError('The current system is not supported.')
def dcos_install_cli(cmd, install_location=None, client_version='1.8'):
"""
Downloads the dcos command line from Mesosphere
"""
system = platform.system()
if not install_location:
raise CLIError(
"No install location specified and it could not be determined from the current platform '{}'".format(
system))
base_url = 'https://downloads.dcos.io/binaries/cli/{}/x86-64/dcos-{}/{}'
if system == 'Windows':
file_url = base_url.format('windows', client_version, 'dcos.exe')
elif system == 'Linux':
# TODO Support ARM CPU here
file_url = base_url.format('linux', client_version, 'dcos')
elif system == 'Darwin':
file_url = base_url.format('darwin', client_version, 'dcos')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to %s', install_location)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as err:
raise CLIError('Connection error while attempting to download client ({})'.format(err))
def k8s_install_cli(cmd, client_version='latest', install_location=None, base_src_url=None,
kubelogin_version='latest', kubelogin_install_location=None,
kubelogin_base_src_url=None):
k8s_install_kubectl(cmd, client_version, install_location, base_src_url)
k8s_install_kubelogin(cmd, kubelogin_version, kubelogin_install_location, kubelogin_base_src_url)
def k8s_install_kubectl(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubectl, a command-line interface for Kubernetes clusters.
"""
if not source_url:
source_url = "https://storage.googleapis.com/kubernetes-release/release"
cloud_name = cmd.cli_ctx.cloud.name
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubectl'
if client_version == 'latest':
context = _ssl_context()
version = urlopen(source_url + '/stable.txt', context=context).read()
client_version = version.decode('UTF-8').strip()
else:
client_version = "v%s" % client_version
file_url = ''
system = platform.system()
base_url = source_url + '/{}/bin/{}/amd64/{}'
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
if system == 'Windows':
file_url = base_url.format(client_version, 'windows', 'kubectl.exe')
elif system == 'Linux':
# TODO: Support ARM CPU here
file_url = base_url.format(client_version, 'linux', 'kubectl')
elif system == 'Darwin':
file_url = base_url.format(client_version, 'darwin', 'kubectl')
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
logger.warning('Downloading client to "%s" from "%s"', install_location, file_url)
try:
_urlretrieve(file_url, install_location)
os.chmod(install_location,
os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def k8s_install_kubelogin(cmd, client_version='latest', install_location=None, source_url=None):
"""
Install kubelogin, a client-go credential (exec) plugin implementing azure authentication.
"""
cloud_name = cmd.cli_ctx.cloud.name
if not source_url:
source_url = 'https://github.com/Azure/kubelogin/releases/download'
if cloud_name.lower() == 'azurechinacloud':
source_url = 'https://mirror.azure.cn/kubernetes/kubelogin'
if client_version == 'latest':
context = _ssl_context()
latest_release_url = 'https://api.github.com/repos/Azure/kubelogin/releases/latest'
if cloud_name.lower() == 'azurechinacloud':
latest_release_url = 'https://mirror.azure.cn/kubernetes/kubelogin/latest'
latest_release = urlopen(latest_release_url, context=context).read()
client_version = json.loads(latest_release)['tag_name'].strip()
else:
client_version = "v%s" % client_version
base_url = source_url + '/{}/kubelogin.zip'
file_url = base_url.format(client_version)
# ensure installation directory exists
install_dir, cli = os.path.dirname(install_location), os.path.basename(install_location)
if not os.path.exists(install_dir):
os.makedirs(install_dir)
system = platform.system()
if system == 'Windows':
sub_dir, binary_name = 'windows_amd64', 'kubelogin.exe'
elif system == 'Linux':
# TODO: Support ARM CPU here
sub_dir, binary_name = 'linux_amd64', 'kubelogin'
elif system == 'Darwin':
sub_dir, binary_name = 'darwin_amd64', 'kubelogin'
else:
raise CLIError('Proxy server ({}) does not exist on the cluster.'.format(system))
with tempfile.TemporaryDirectory() as tmp_dir:
try:
download_path = os.path.join(tmp_dir, 'kubelogin.zip')
logger.warning('Downloading client to "%s" from "%s"', download_path, file_url)
_urlretrieve(file_url, download_path)
except IOError as ex:
raise CLIError('Connection error while attempting to download client ({})'.format(ex))
_unzip(download_path, tmp_dir)
download_path = os.path.join(tmp_dir, 'bin', sub_dir, binary_name)
shutil.move(download_path, install_location)
os.chmod(install_location, os.stat(install_location).st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH)
if system == 'Windows': # be verbose, as the install_location likely not in Windows's search PATHs
env_paths = os.environ['PATH'].split(';')
found = next((x for x in env_paths if x.lower().rstrip('\\') == install_dir.lower()), None)
if not found:
# pylint: disable=logging-format-interpolation
logger.warning('Please add "{0}" to your search PATH so the `{1}` can be found. 2 options: \n'
' 1. Run "set PATH=%PATH%;{0}" or "$env:path += \'{0}\'" for PowerShell. '
'This is good for the current command session.\n'
' 2. Update system PATH environment variable by following '
'"Control Panel->System->Advanced->Environment Variables", and re-open the command window. '
'You only need to do it once'.format(install_dir, cli))
else:
logger.warning('Please ensure that %s is in your search PATH, so the `%s` command can be found.',
install_dir, cli)
def _build_service_principal(rbac_client, cli_ctx, name, url, client_secret):
# use get_progress_controller
hook = cli_ctx.get_progress_controller(True)
hook.add(messsage='Creating service principal', value=0, total_val=1.0)
logger.info('Creating service principal')
# always create application with 5 years expiration
start_date = datetime.datetime.utcnow()
end_date = start_date + relativedelta(years=5)
result, aad_session_key = create_application(rbac_client.applications, name, url, [url], password=client_secret,
start_date=start_date, end_date=end_date)
service_principal = result.app_id # pylint: disable=no-member
for x in range(0, 10):
hook.add(message='Creating service principal', value=0.1 * x, total_val=1.0)
try:
create_service_principal(cli_ctx, service_principal, rbac_client=rbac_client)
break
# TODO figure out what exception AAD throws here sometimes.
except Exception as ex: # pylint: disable=broad-except
logger.info(ex)
time.sleep(2 + 2 * x)
else:
return False, aad_session_key
hook.add(message='Finished service principal creation', value=1.0, total_val=1.0)
logger.info('Finished service principal creation')
return service_principal, aad_session_key
def _add_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal=True, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to propagate', value=0, total_val=1.0)
logger.info('Waiting for AAD role to propagate')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to propagate', value=0.1 * x, total_val=1.0)
try:
# TODO: break this out into a shared utility library
create_role_assignment(cli_ctx, role, service_principal_msi_id, is_service_principal, scope=scope)
break
except CloudError as ex:
if ex.message == 'The role assignment already exists.':
break
logger.info(ex.message)
except: # pylint: disable=bare-except
pass
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role propagation done', value=1.0, total_val=1.0)
logger.info('AAD role propagation done')
return True
def delete_role_assignments(cli_ctx, ids=None, assignee=None, role=None, resource_group_name=None,
scope=None, include_inherited=False, yes=None):
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
ids = ids or []
if ids:
if assignee or role or resource_group_name or scope or include_inherited:
raise CLIError('When assignment ids are used, other parameter values are not required')
for i in ids:
assignments_client.delete_by_id(i)
return
if not any([ids, assignee, role, resource_group_name, scope, assignee, yes]):
msg = 'This will delete all role assignments under the subscription. Are you sure?'
if not prompt_y_n(msg, default="n"):
return
scope = _build_role_scope(resource_group_name, scope,
assignments_client.config.subscription_id)
assignments = _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited,
include_groups=False)
if assignments:
for a in assignments:
assignments_client.delete_by_id(a.id)
def _delete_role_assignments(cli_ctx, role, service_principal, delay=2, scope=None):
# AAD can have delays in propagating data, so sleep and retry
hook = cli_ctx.get_progress_controller(True)
hook.add(message='Waiting for AAD role to delete', value=0, total_val=1.0)
logger.info('Waiting for AAD role to delete')
for x in range(0, 10):
hook.add(message='Waiting for AAD role to delete', value=0.1 * x, total_val=1.0)
try:
delete_role_assignments(cli_ctx,
role=role,
assignee=service_principal,
scope=scope)
break
except CLIError as ex:
raise ex
except CloudError as ex:
logger.info(ex)
time.sleep(delay + delay * x)
else:
return False
hook.add(message='AAD role deletion done', value=1.0, total_val=1.0)
logger.info('AAD role deletion done')
return True
def _search_role_assignments(cli_ctx, assignments_client, definitions_client,
scope, assignee, role, include_inherited, include_groups):
assignee_object_id = None
if assignee:
assignee_object_id = _resolve_object_id(cli_ctx, assignee)
# always use "scope" if provided, so we can get assignments beyond subscription e.g. management groups
if scope:
assignments = list(assignments_client.list_for_scope(
scope=scope, filter='atScope()'))
elif assignee_object_id:
if include_groups:
f = "assignedTo('{}')".format(assignee_object_id)
else:
f = "principalId eq '{}'".format(assignee_object_id)
assignments = list(assignments_client.list(filter=f))
else:
assignments = list(assignments_client.list())
if assignments:
assignments = [a for a in assignments if (
not scope or
include_inherited and re.match(_get_role_property(a, 'scope'), scope, re.I) or
_get_role_property(a, 'scope').lower() == scope.lower()
)]
if role:
role_id = _resolve_role_id(role, scope, definitions_client)
assignments = [i for i in assignments if _get_role_property(
i, 'role_definition_id') == role_id]
if assignee_object_id:
assignments = [i for i in assignments if _get_role_property(
i, 'principal_id') == assignee_object_id]
return assignments
def _get_role_property(obj, property_name):
if isinstance(obj, dict):
return obj[property_name]
return getattr(obj, property_name)
def _get_default_dns_prefix(name, resource_group_name, subscription_id):
# Use subscription id to provide uniqueness and prevent DNS name clashes
name_part = re.sub('[^A-Za-z0-9-]', '', name)[0:10]
if not name_part[0].isalpha():
name_part = (str('a') + name_part)[0:10]
resource_group_part = re.sub('[^A-Za-z0-9-]', '', resource_group_name)[0:16]
return '{}-{}-{}'.format(name_part, resource_group_part, subscription_id[0:6])
def list_acs_locations(cmd, client):
return {
"productionRegions": regions_in_prod,
"previewRegions": regions_in_preview
}
def _generate_windows_profile(windows, admin_username, admin_password):
if windows:
if not admin_password:
raise CLIError('--admin-password is required.')
if len(admin_password) < 6:
raise CLIError('--admin-password must be at least 6 characters')
windows_profile = {
"adminUsername": admin_username,
"adminPassword": admin_password,
}
return windows_profile
return None
def _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile):
master_pool_profile = {}
default_master_pool_profile = {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
}
if api_version == "2017-07-01":
default_master_pool_profile = _update_dict(default_master_pool_profile, {
"count": int(master_count),
"dnsPrefix": dns_name_prefix + 'mgmt',
"vmSize": master_vm_size,
"osDiskSizeGB": int(master_osdisk_size),
"vnetSubnetID": master_vnet_subnet_id,
"firstConsecutiveStaticIP": master_first_consecutive_static_ip,
"storageProfile": master_storage_profile,
})
if not master_profile:
master_pool_profile = default_master_pool_profile
else:
master_pool_profile = _update_dict(default_master_pool_profile, master_profile)
return master_pool_profile
def _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile):
agent_pool_profiles = []
default_agent_pool_profile = {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
}
if api_version == "2017-07-01":
default_agent_pool_profile = _update_dict(default_agent_pool_profile, {
"count": int(agent_count),
"vmSize": agent_vm_size,
"osDiskSizeGB": int(agent_osdisk_size),
"osType": os_type,
"dnsPrefix": dns_name_prefix + 'agent',
"vnetSubnetID": agent_vnet_subnet_id,
"ports": agent_ports,
"storageProfile": agent_storage_profile,
})
if agent_profiles is None:
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, {"name": "agentpool0"}))
else:
# override agentPoolProfiles by using the passed in agent_profiles
for idx, ap in enumerate(agent_profiles):
# if the user specified dnsPrefix, we honor that
# otherwise, we use the idx to avoid duplicate dns name
a = _update_dict({"dnsPrefix": dns_name_prefix + 'agent' + str(idx)}, ap)
agent_pool_profiles.append(_update_dict(default_agent_pool_profile, a))
return agent_pool_profiles
def _generate_outputs(name, orchestrator_type, admin_username):
# define outputs
outputs = {
"masterFQDN": {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).masterProfile.fqdn]".format(name) # pylint: disable=line-too-long
},
"sshMaster0": {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 22')]".format(admin_username, name) # pylint: disable=line-too-long
},
}
if orchestrator_type.lower() != "kubernetes":
outputs["agentFQDN"] = {
"type": "string",
"value": "[reference(concat('Microsoft.ContainerService/containerServices/', '{}')).agentPoolProfiles[0].fqdn]".format(name) # pylint: disable=line-too-long
}
# override sshMaster0 for non-kubernetes scenarios
outputs["sshMaster0"] = {
"type": "string",
"value": "[concat('ssh ', '{0}', '@', reference(concat('Microsoft.ContainerService/containerServices/', '{1}')).masterProfile.fqdn, ' -A -p 2200')]".format(admin_username, name) # pylint: disable=line-too-long
}
return outputs
def _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile):
properties = {
"orchestratorProfile": {
"orchestratorType": orchestrator_type,
},
"masterProfile": master_pool_profile,
"agentPoolProfiles": agent_pool_profiles,
"linuxProfile": {
"ssh": {
"publicKeys": [
{
"keyData": ssh_key_value
}
]
},
"adminUsername": admin_username
},
}
if api_version == "2017-07-01":
properties["orchestratorProfile"]["orchestratorVersion"] = orchestrator_version
if windows_profile is not None:
properties["windowsProfile"] = windows_profile
return properties
def _get_user_assigned_identity_client_id(cli_ctx, resource_id):
msi_client = get_msi_client(cli_ctx)
pattern = '/subscriptions/.*?/resourcegroups/(.*?)/providers/microsoft.managedidentity/userassignedidentities/(.*)'
resource_id = resource_id.lower()
match = re.search(pattern, resource_id)
if match:
resource_group_name = match.group(1)
identity_name = match.group(2)
try:
identity = msi_client.user_assigned_identities.get(resource_group_name=resource_group_name,
resource_name=identity_name)
except CloudError as ex:
if 'was not found' in ex.message:
raise ResourceNotFoundError("Identity {} not found.".format(resource_id))
raise ClientRequestError(ex.message)
return identity.client_id
raise InvalidArgumentValueError("Cannot parse identity name from provided resource id {}.".format(resource_id))
# pylint: disable=too-many-locals
def acs_create(cmd, client, resource_group_name, deployment_name, name, ssh_key_value, dns_name_prefix=None,
location=None, admin_username="azureuser", api_version=None, master_profile=None,
master_vm_size="Standard_D2_v2", master_osdisk_size=0, master_count=1, master_vnet_subnet_id="",
master_first_consecutive_static_ip="10.240.255.5", master_storage_profile="",
agent_profiles=None, agent_vm_size="Standard_D2_v2", agent_osdisk_size=0,
agent_count=3, agent_vnet_subnet_id="", agent_ports=None, agent_storage_profile="",
orchestrator_type="DCOS", orchestrator_version="", service_principal=None, client_secret=None, tags=None,
windows=False, admin_password="", generate_ssh_keys=False, # pylint: disable=unused-argument
validate=False, no_wait=False):
"""Create a new Acs.
:param resource_group_name: The name of the resource group. The name
is case insensitive.
:type resource_group_name: str
:param deployment_name: The name of the deployment.
:type deployment_name: str
:param dns_name_prefix: Sets the Domain name prefix for the cluster.
The concatenation of the domain name and the regionalized DNS zone
make up the fully qualified domain name associated with the public
IP address.
:type dns_name_prefix: str
:param name: Resource name for the container service.
:type name: str
:param ssh_key_value: Configure all linux machines with the SSH RSA
public key string. Your key should include three parts, for example
'ssh-rsa AAAAB...snip...UcyupgH azureuser@linuxvm
:type ssh_key_value: str
:param content_version: If included it must match the ContentVersion
in the template.
:type content_version: str
:param admin_username: User name for the Linux Virtual Machines.
:type admin_username: str
:param api_version: ACS API version to use
:type api_version: str
:param master_profile: MasterProfile used to describe master pool
:type master_profile: dict
:param master_vm_size: The size of master pool Virtual Machine
:type master_vm_size: str
:param master_osdisk_size: The osDisk size in GB of master pool Virtual Machine
:type master_osdisk_size: int
:param master_count: The number of masters for the cluster.
:type master_count: int
:param master_vnet_subnet_id: The vnet subnet id for master pool
:type master_vnet_subnet_id: str
:param master_storage_profile: The storage profile used for master pool.
Possible value could be StorageAccount, ManagedDisk.
:type master_storage_profile: str
:param agent_profiles: AgentPoolProfiles used to describe agent pools
:type agent_profiles: dict
:param agent_vm_size: The size of the Virtual Machine.
:type agent_vm_size: str
:param agent_osdisk_size: The osDisk size in GB of agent pool Virtual Machine
:type agent_osdisk_size: int
:param agent_vnet_subnet_id: The vnet subnet id for master pool
:type agent_vnet_subnet_id: str
:param agent_ports: the ports exposed on the agent pool
:type agent_ports: list
:param agent_storage_profile: The storage profile used for agent pool.
Possible value could be StorageAccount, ManagedDisk.
:type agent_storage_profile: str
:param location: Location for VM resources.
:type location: str
:param orchestrator_type: The type of orchestrator used to manage the
applications on the cluster.
:type orchestrator_type: str or :class:`orchestratorType
<Default.models.orchestratorType>`
:param tags: Tags object.
:type tags: object
:param windows: If true, the cluster will be built for running Windows container.
:type windows: bool
:param admin_password: The adminstration password for Windows nodes. Only available if --windows=true
:type admin_password: str
:param bool raw: returns the direct response alongside the
deserialized response
:rtype:
:class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`
instance that returns :class:`DeploymentExtended
<Default.models.DeploymentExtended>`
:rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`
if raw=true
:raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>`
"""
if ssh_key_value is not None and not is_valid_ssh_rsa_public_key(ssh_key_value):
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(ssh_key_value))
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
# if api-version is not specified, or specified in a version not supported
# override based on location
if api_version is None or api_version not in ["2017-01-31", "2017-07-01"]:
if location in regions_in_preview:
api_version = "2017-07-01" # 2017-07-01 supported in the preview locations
else:
api_version = "2017-01-31" # 2017-01-31 applied to other locations
if orchestrator_type.lower() == 'kubernetes':
principal_obj = _ensure_service_principal(cmd.cli_ctx, service_principal, client_secret, subscription_id,
dns_name_prefix, location, name)
client_secret = principal_obj.get("client_secret")
service_principal = principal_obj.get("service_principal")
elif windows:
raise CLIError('--windows is only supported for Kubernetes clusters')
# set location if void
if not location:
location = '[resourceGroup().location]'
# set os_type
os_type = 'Linux'
if windows:
os_type = 'Windows'
# set agent_ports if void
if not agent_ports:
agent_ports = []
# get windows_profile
windows_profile = _generate_windows_profile(windows, admin_username, admin_password)
# The resources.properties fields should match with ContainerServices' api model
master_pool_profile = _generate_master_pool_profile(api_version, master_profile, master_count, dns_name_prefix,
master_vm_size, master_osdisk_size, master_vnet_subnet_id,
master_first_consecutive_static_ip, master_storage_profile)
agent_pool_profiles = _generate_agent_pool_profiles(api_version, agent_profiles, agent_count, dns_name_prefix,
agent_vm_size, os_type, agent_osdisk_size, agent_vnet_subnet_id,
agent_ports, agent_storage_profile)
outputs = _generate_outputs(name, orchestrator_type, admin_username)
properties = _generate_properties(api_version, orchestrator_type, orchestrator_version, master_pool_profile,
agent_pool_profiles, ssh_key_value, admin_username, windows_profile)
resource = {
"apiVersion": api_version,
"location": location,
"type": "Microsoft.ContainerService/containerServices",
"name": name,
"tags": tags,
"properties": properties,
}
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"resources": [
resource,
],
"outputs": outputs,
}
params = {}
if service_principal is not None and client_secret is not None:
properties["servicePrincipalProfile"] = {
"clientId": service_principal,
"secret": "[parameters('clientSecret')]",
}
template["parameters"] = {
"clientSecret": {
"type": "secureString",
"metadata": {
"description": "The client secret for the service principal"
}
}
}
params = {
"clientSecret": {
"value": client_secret
}
}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
return _invoke_deployment(cmd, resource_group_name, deployment_name,
template, params, validate, no_wait)
except CloudError as ex:
retry_exception = ex
if 'is not valid according to the validation procedure' in ex.message or \
'The credentials in ServicePrincipalProfile were invalid' in ex.message or \
'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def store_acs_service_principal(subscription_id, client_secret, service_principal,
file_name='acsServicePrincipal.json'):
obj = {}
if client_secret:
obj['client_secret'] = client_secret
if service_principal:
obj['service_principal'] = service_principal
config_path = os.path.join(get_config_dir(), file_name)
full_config = load_service_principals(config_path=config_path)
if not full_config:
full_config = {}
full_config[subscription_id] = obj
with os.fdopen(os.open(config_path, os.O_RDWR | os.O_CREAT | os.O_TRUNC, 0o600),
'w+') as spFile:
json.dump(full_config, spFile)
def load_acs_service_principal(subscription_id, file_name='acsServicePrincipal.json'):
config_path = os.path.join(get_config_dir(), file_name)
config = load_service_principals(config_path)
if not config:
return None
return config.get(subscription_id)
def load_service_principals(config_path):
if not os.path.exists(config_path):
return None
fd = os.open(config_path, os.O_RDONLY)
try:
with os.fdopen(fd) as f:
return shell_safe_json_parse(f.read())
except: # pylint: disable=bare-except
return None
def _invoke_deployment(cmd, resource_group_name, deployment_name, template, parameters, validate, no_wait,
subscription_id=None):
from azure.cli.core.profiles import ResourceType
DeploymentProperties = cmd.get_models('DeploymentProperties', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
properties = DeploymentProperties(template=template, parameters=parameters, mode='incremental')
smc = get_mgmt_service_client(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES,
subscription_id=subscription_id).deployments
if validate:
logger.info('==== BEGIN TEMPLATE ====')
logger.info(json.dumps(template, indent=2))
logger.info('==== END TEMPLATE ====')
if cmd.supported_api_version(min_api='2019-10-01', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES):
Deployment = cmd.get_models('Deployment', resource_type=ResourceType.MGMT_RESOURCE_RESOURCES)
deployment = Deployment(properties=properties)
if validate:
validation_poller = smc.validate(resource_group_name, deployment_name, deployment)
return LongRunningOperation(cmd.cli_ctx)(validation_poller)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, deployment)
if validate:
return smc.validate(resource_group_name, deployment_name, properties)
return sdk_no_wait(no_wait, smc.create_or_update, resource_group_name, deployment_name, properties)
def k8s_get_credentials(cmd, client, name, resource_group_name,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
ssh_key_file=None,
overwrite_existing=False):
"""Download and install kubectl credentials from the cluster master
:param name: The name of the cluster.
:type name: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param path: Where to install the kubectl config file
:type path: str
:param ssh_key_file: Path to an SSH key file to use
:type ssh_key_file: str
"""
acs_info = _get_acs_info(cmd.cli_ctx, name, resource_group_name)
_k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing)
def _k8s_get_credentials_internal(name, acs_info, path, ssh_key_file, overwrite_existing):
if ssh_key_file is not None and not os.path.isfile(ssh_key_file):
raise CLIError('Private key file {} does not exist'.format(ssh_key_file))
dns_prefix = acs_info.master_profile.dns_prefix # pylint: disable=no-member
location = acs_info.location # pylint: disable=no-member
user = acs_info.linux_profile.admin_username # pylint: disable=no-member
_mkdir_p(os.path.dirname(path))
path_candidate = path
ix = 0
while os.path.exists(path_candidate):
ix += 1
path_candidate = '{}-{}-{}'.format(path, name, ix)
# TODO: this only works for public cloud, need other casing for national clouds
acs_client.secure_copy(user, '{}.{}.cloudapp.azure.com'.format(dns_prefix, location),
'.kube/config', path_candidate, key_filename=ssh_key_file)
# merge things
if path_candidate != path:
try:
merge_kubernetes_configurations(path, path_candidate, overwrite_existing)
except yaml.YAMLError as exc:
logger.warning('Failed to merge credentials to kube config file: %s', exc)
logger.warning('The credentials have been saved to %s', path_candidate)
def _handle_merge(existing, addition, key, replace):
if not addition.get(key, False):
return
if not existing.get(key):
existing[key] = addition[key]
return
for i in addition[key]:
for j in existing[key]:
if not i.get('name', False) or not j.get('name', False):
continue
if i['name'] == j['name']:
if replace or i == j:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in your kubeconfig file.\nOverwrite?'
overwrite = False
try:
overwrite = prompt_y_n(msg.format(i['name']))
except NoTTYException:
pass
if overwrite:
existing[key].remove(j)
else:
msg = 'A different object named {} already exists in {} in your kubeconfig file.'
raise CLIError(msg.format(i['name'], key))
existing[key].append(i)
def load_kubernetes_configuration(filename):
try:
with open(filename) as stream:
return yaml.safe_load(stream)
except (IOError, OSError) as ex:
if getattr(ex, 'errno', 0) == errno.ENOENT:
raise CLIError('{} does not exist'.format(filename))
raise
except (yaml.parser.ParserError, UnicodeDecodeError) as ex:
raise CLIError('Error parsing {} ({})'.format(filename, str(ex)))
def merge_kubernetes_configurations(existing_file, addition_file, replace, context_name=None):
existing = load_kubernetes_configuration(existing_file)
addition = load_kubernetes_configuration(addition_file)
if context_name is not None:
addition['contexts'][0]['name'] = context_name
addition['contexts'][0]['context']['cluster'] = context_name
addition['clusters'][0]['name'] = context_name
addition['current-context'] = context_name
# rename the admin context so it doesn't overwrite the user context
for ctx in addition.get('contexts', []):
try:
if ctx['context']['user'].startswith('clusterAdmin'):
admin_name = ctx['name'] + '-admin'
addition['current-context'] = ctx['name'] = admin_name
break
except (KeyError, TypeError):
continue
if addition is None:
raise CLIError('failed to load additional configuration from {}'.format(addition_file))
if existing is None:
existing = addition
else:
_handle_merge(existing, addition, 'clusters', replace)
_handle_merge(existing, addition, 'users', replace)
_handle_merge(existing, addition, 'contexts', replace)
existing['current-context'] = addition['current-context']
# check that ~/.kube/config is only read- and writable by its owner
if platform.system() != 'Windows':
existing_file_perms = "{:o}".format(stat.S_IMODE(os.lstat(existing_file).st_mode))
if not existing_file_perms.endswith('600'):
logger.warning('%s has permissions "%s".\nIt should be readable and writable only by its owner.',
existing_file, existing_file_perms)
with open(existing_file, 'w+') as stream:
yaml.safe_dump(existing, stream, default_flow_style=False)
current_context = addition.get('current-context', 'UNKNOWN')
msg = 'Merged "{}" as current context in {}'.format(current_context, existing_file)
print(msg)
def _get_host_name(acs_info):
"""
Gets the FQDN from the acs_info object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info is None:
raise CLIError('Missing acs_info')
if acs_info.master_profile is None:
raise CLIError('Missing master_profile')
if acs_info.master_profile.fqdn is None:
raise CLIError('Missing fqdn')
return acs_info.master_profile.fqdn
def _get_username(acs_info):
"""
Gets the admin user name from the Linux profile of the ContainerService object.
:param acs_info: ContainerService object from Azure REST API
:type acs_info: ContainerService
"""
if acs_info.linux_profile is not None:
return acs_info.linux_profile.admin_username
return None
def _get_acs_info(cli_ctx, name, resource_group_name):
"""
Gets the ContainerService object from Azure REST API.
:param name: ACS resource name
:type name: String
:param resource_group_name: Resource group name
:type resource_group_name: String
"""
container_services = cf_container_services(cli_ctx, None)
return container_services.get(resource_group_name, name)
def _rand_str(n):
"""
Gets a random string
"""
choices = string.ascii_lowercase + string.digits
return ''.join(random.SystemRandom().choice(choices) for _ in range(n))
def _mkdir_p(path):
# http://stackoverflow.com/a/600612
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def update_acs(cmd, client, resource_group_name, container_service_name, new_agent_count):
instance = client.get(resource_group_name, container_service_name)
instance.agent_pool_profiles[0].count = new_agent_count # pylint: disable=no-member
# null out the service principal because otherwise validation complains
if instance.orchestrator_profile.orchestrator_type == ContainerServiceOrchestratorTypes.kubernetes:
instance.service_principal_profile = None
# null out the windows profile so that validation doesn't complain about not having the admin password
instance.windows_profile = None
return client.create_or_update(resource_group_name, container_service_name, instance)
def list_container_services(cmd, client, resource_group_name=None):
''' List Container Services. '''
svc_list = client.list_by_resource_group(resource_group_name=resource_group_name) \
if resource_group_name else client.list()
return list(svc_list)
def show_service_principal(client, identifier):
object_id = _resolve_service_principal(client, identifier)
return client.get(object_id)
def _resolve_service_principal(client, identifier):
# todo: confirm with graph team that a service principal name must be unique
result = list(client.list(filter="servicePrincipalNames/any(c:c eq '{}')".format(identifier)))
if result:
return result[0].object_id
try:
uuid.UUID(identifier)
return identifier # assume an object id
except ValueError:
raise CLIError("service principal '{}' doesn't exist".format(identifier))
def create_application(client, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
app_create_param = ApplicationCreateParameters(available_to_other_tenants=available_to_other_tenants,
display_name=display_name,
identifier_uris=identifier_uris,
homepage=homepage,
reply_urls=reply_urls,
key_credentials=key_creds,
password_credentials=password_creds,
required_resource_access=required_resource_accesses)
try:
result = client.create(app_create_param, raw=True)
return result.output, result.response.headers["ocp-aad-session-key"]
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def update_application(client, object_id, display_name, homepage, identifier_uris,
available_to_other_tenants=False, password=None, reply_urls=None,
key_value=None, key_type=None, key_usage=None, start_date=None,
end_date=None, required_resource_accesses=None):
from azure.graphrbac.models import GraphErrorException
password_creds, key_creds = _build_application_creds(password, key_value, key_type,
key_usage, start_date, end_date)
try:
if key_creds:
client.update_key_credentials(object_id, key_creds)
if password_creds:
client.update_password_credentials(object_id, password_creds)
if reply_urls:
client.patch(object_id, ApplicationUpdateParameters(reply_urls=reply_urls))
return
except GraphErrorException as ex:
if 'insufficient privileges' in str(ex).lower():
link = 'https://docs.microsoft.com/azure/azure-resource-manager/resource-group-create-service-principal-portal' # pylint: disable=line-too-long
raise CLIError("Directory permission is needed for the current user to register the application. "
"For how to configure, please refer '{}'. Original error: {}".format(link, ex))
raise
def _build_application_creds(password=None, key_value=None, key_type=None,
key_usage=None, start_date=None, end_date=None):
if password and key_value:
raise CLIError('specify either --password or --key-value, but not both.')
if not start_date:
start_date = datetime.datetime.utcnow()
elif isinstance(start_date, str):
start_date = dateutil.parser.parse(start_date)
if not end_date:
end_date = start_date + relativedelta(years=1)
elif isinstance(end_date, str):
end_date = dateutil.parser.parse(end_date)
key_type = key_type or 'AsymmetricX509Cert'
key_usage = key_usage or 'Verify'
password_creds = None
key_creds = None
if password:
password_creds = [PasswordCredential(start_date=start_date, end_date=end_date,
key_id=str(uuid.uuid4()), value=password)]
elif key_value:
key_creds = [KeyCredential(start_date=start_date, end_date=end_date, value=key_value,
key_id=str(uuid.uuid4()), usage=key_usage, type=key_type)]
return (password_creds, key_creds)
def create_service_principal(cli_ctx, identifier, resolve_app=True, rbac_client=None):
if rbac_client is None:
rbac_client = get_graph_rbac_management_client(cli_ctx)
if resolve_app:
try:
uuid.UUID(identifier)
result = list(rbac_client.applications.list(filter="appId eq '{}'".format(identifier)))
except ValueError:
result = list(rbac_client.applications.list(
filter="identifierUris/any(s:s eq '{}')".format(identifier)))
if not result: # assume we get an object id
result = [rbac_client.applications.get(identifier)]
app_id = result[0].app_id
else:
app_id = identifier
return rbac_client.service_principals.create(ServicePrincipalCreateParameters(app_id=app_id, account_enabled=True))
def create_role_assignment(cli_ctx, role, assignee, is_service_principal, resource_group_name=None, scope=None):
return _create_role_assignment(cli_ctx,
role, assignee, resource_group_name,
scope, resolve_assignee=is_service_principal)
def _create_role_assignment(cli_ctx, role, assignee,
resource_group_name=None, scope=None, resolve_assignee=True):
from azure.cli.core.profiles import ResourceType, get_sdk
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
definitions_client = factory.role_definitions
scope = _build_role_scope(resource_group_name, scope, assignments_client.config.subscription_id)
role_id = _resolve_role_id(role, scope, definitions_client)
# If the cluster has service principal resolve the service principal client id to get the object id,
# if not use MSI object id.
object_id = _resolve_object_id(cli_ctx, assignee) if resolve_assignee else assignee
RoleAssignmentCreateParameters = get_sdk(cli_ctx, ResourceType.MGMT_AUTHORIZATION,
'RoleAssignmentCreateParameters', mod='models',
operation_group='role_assignments')
parameters = RoleAssignmentCreateParameters(role_definition_id=role_id, principal_id=object_id)
assignment_name = uuid.uuid4()
custom_headers = None
return assignments_client.create(scope, assignment_name, parameters, custom_headers=custom_headers)
def _build_role_scope(resource_group_name, scope, subscription_id):
subscription_scope = '/subscriptions/' + subscription_id
if scope:
if resource_group_name:
err = 'Resource group "{}" is redundant because scope is supplied'
raise CLIError(err.format(resource_group_name))
elif resource_group_name:
scope = subscription_scope + '/resourceGroups/' + resource_group_name
else:
scope = subscription_scope
return scope
def _resolve_role_id(role, scope, definitions_client):
role_id = None
try:
uuid.UUID(role)
role_id = role
except ValueError:
pass
if not role_id: # retrieve role id
role_defs = list(definitions_client.list(scope, "roleName eq '{}'".format(role)))
if not role_defs:
raise CLIError("Role '{}' doesn't exist.".format(role))
if len(role_defs) > 1:
ids = [r.id for r in role_defs]
err = "More than one role matches the given name '{}'. Please pick a value from '{}'"
raise CLIError(err.format(role, ids))
role_id = role_defs[0].id
return role_id
def _resolve_object_id(cli_ctx, assignee):
client = get_graph_rbac_management_client(cli_ctx)
result = None
if assignee.find('@') >= 0: # looks like a user principal name
result = list(client.users.list(filter="userPrincipalName eq '{}'".format(assignee)))
if not result:
result = list(client.service_principals.list(
filter="servicePrincipalNames/any(c:c eq '{}')".format(assignee)))
if not result: # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("No matches in graph database for '{}'".format(assignee))
return result[0].object_id
def _get_object_stubs(graph_client, assignees):
params = GetObjectsParameters(include_directory_object_references=True,
object_ids=assignees)
return list(graph_client.objects.get_objects_by_object_ids(params))
def _update_dict(dict1, dict2):
cp = dict1.copy()
cp.update(dict2)
return cp
def subnet_role_assignment_exists(cli_ctx, scope):
network_contributor_role_id = "4d97b98b-1d4f-4787-a291-c67834d212e7"
factory = get_auth_management_client(cli_ctx, scope)
assignments_client = factory.role_assignments
for i in assignments_client.list_for_scope(scope=scope, filter='atScope()'):
if i.scope == scope and i.role_definition_id.endswith(network_contributor_role_id):
return True
return False
# pylint: disable=too-many-statements,too-many-branches
def aks_browse(cmd, client, resource_group_name, name, disable_browser=False,
listen_address='127.0.0.1', listen_port='8001'):
# verify the kube-dashboard addon was not disabled
instance = client.get(resource_group_name, name)
addon_profiles = instance.addon_profiles or {}
# addon name is case insensitive
addon_profile = next((addon_profiles[k] for k in addon_profiles
if k.lower() == CONST_KUBE_DASHBOARD_ADDON_NAME.lower()),
ManagedClusterAddonProfile(enabled=False))
# open portal view if addon is not enabled or k8s version >= 1.19.0
if StrictVersion(instance.kubernetes_version) >= StrictVersion('1.19.0') or (not addon_profile.enabled):
subscription_id = get_subscription_id(cmd.cli_ctx)
dashboardURL = (
cmd.cli_ctx.cloud.endpoints.portal + # Azure Portal URL (https://portal.azure.com for public cloud)
('/#resource/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.ContainerService'
'/managedClusters/{2}/workloads').format(subscription_id, resource_group_name, name)
)
if in_cloud_console():
logger.warning('To view the Kubernetes resources view, please open %s in a new tab', dashboardURL)
else:
logger.warning('Kubernetes resources view on %s', dashboardURL)
if not disable_browser:
webbrowser.open_new_tab(dashboardURL)
return
# otherwise open the kube-dashboard addon
if not which('kubectl'):
raise CLIError('Can not find kubectl executable in PATH')
_, browse_path = tempfile.mkstemp()
aks_get_credentials(cmd, client, resource_group_name, name, admin=False, path=browse_path)
# find the dashboard pod's name
try:
dashboard_pod = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--output", "name", "--selector", "k8s-app=kubernetes-dashboard"],
universal_newlines=True)
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard pod: {}'.format(err))
if dashboard_pod:
# remove any "pods/" or "pod/" prefix from the name
dashboard_pod = str(dashboard_pod).split('/')[-1].strip()
else:
raise CLIError("Couldn't find the Kubernetes dashboard pod.")
# find the port
try:
dashboard_port = subprocess.check_output(
["kubectl", "get", "pods", "--kubeconfig", browse_path, "--namespace", "kube-system",
"--selector", "k8s-app=kubernetes-dashboard",
"--output", "jsonpath='{.items[0].spec.containers[0].ports[0].containerPort}'"]
)
# output format: b"'{port}'"
dashboard_port = int((dashboard_port.decode('utf-8').replace("'", "")))
except subprocess.CalledProcessError as err:
raise CLIError('Could not find dashboard port: {}'.format(err))
# use https if dashboard container is using https
if dashboard_port == 8443:
protocol = 'https'
else:
protocol = 'http'
proxy_url = 'http://{0}:{1}/'.format(listen_address, listen_port)
dashboardURL = '{0}/api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(proxy_url,
protocol)
# launch kubectl port-forward locally to access the remote dashboard
if in_cloud_console():
# TODO: better error handling here.
response = requests.post('http://localhost:8888/openport/{0}'.format(listen_port))
result = json.loads(response.text)
dashboardURL = '{0}api/v1/namespaces/kube-system/services/{1}:kubernetes-dashboard:/proxy/'.format(
result['url'], protocol)
term_id = os.environ.get('ACC_TERM_ID')
if term_id:
response = requests.post('http://localhost:8888/openLink/{0}'.format(term_id),
json={"url": dashboardURL})
logger.warning('To view the console, please open %s in a new tab', dashboardURL)
else:
logger.warning('Proxy running on %s', proxy_url)
logger.warning('Press CTRL+C to close the tunnel...')
if not disable_browser:
wait_then_open_async(dashboardURL)
try:
try:
subprocess.check_output(["kubectl", "--kubeconfig", browse_path, "proxy", "--address",
listen_address, "--port", listen_port], stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
if err.output.find(b'unknown flag: --address'):
if listen_address != '127.0.0.1':
logger.warning('"--address" is only supported in kubectl v1.13 and later.')
logger.warning('The "--listen-address" argument will be ignored.')
subprocess.call(["kubectl", "--kubeconfig", browse_path, "proxy", "--port", listen_port])
except KeyboardInterrupt:
# Let command processing finish gracefully after the user presses [Ctrl+C]
pass
finally:
if in_cloud_console():
requests.post('http://localhost:8888/closeport/8001')
def _trim_nodepoolname(nodepool_name):
if not nodepool_name:
return "nodepool1"
return nodepool_name[:12]
def _validate_ssh_key(no_ssh_key, ssh_key_value):
if not no_ssh_key:
try:
if not ssh_key_value or not is_valid_ssh_rsa_public_key(ssh_key_value):
raise ValueError()
except (TypeError, ValueError):
shortened_key = truncate_text(ssh_key_value)
raise CLIError('Provided ssh key ({}) is invalid or non-existent'.format(shortened_key))
def _add_monitoring_role_assignment(result, cluster_resource_id, cmd):
service_principal_msi_id = None
# Check if service principal exists, if it does, assign permissions to service principal
# Else, provide permissions to MSI
if (
hasattr(result, 'service_principal_profile') and
hasattr(result.service_principal_profile, 'client_id') and
result.service_principal_profile.client_id.lower() != 'msi'
):
logger.info('valid service principal exists, using it')
service_principal_msi_id = result.service_principal_profile.client_id
is_service_principal = True
elif (
(hasattr(result, 'addon_profiles')) and
(CONST_MONITORING_ADDON_NAME in result.addon_profiles) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME], 'identity')) and
(hasattr(result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity, 'object_id'))
):
logger.info('omsagent MSI exists, using it')
service_principal_msi_id = result.addon_profiles[CONST_MONITORING_ADDON_NAME].identity.object_id
is_service_principal = False
if service_principal_msi_id is not None:
if not _add_role_assignment(cmd.cli_ctx, 'Monitoring Metrics Publisher',
service_principal_msi_id, is_service_principal, scope=cluster_resource_id):
logger.warning('Could not create a role assignment for Monitoring addon. '
'Are you an Owner on this subscription?')
else:
logger.warning('Could not find service principal or user assigned MSI for role'
'assignment')
# pylint: disable=too-many-statements,too-many-branches
def aks_create(cmd, client, resource_group_name, name, ssh_key_value, # pylint: disable=too-many-locals
dns_name_prefix=None,
location=None,
admin_username="azureuser",
windows_admin_username=None,
windows_admin_password=None,
enable_ahub=False,
kubernetes_version='',
node_vm_size="Standard_DS2_v2",
node_osdisk_type=None,
node_osdisk_size=0,
node_osdisk_diskencryptionset_id=None,
node_count=3,
nodepool_name="nodepool1",
nodepool_tags=None,
nodepool_labels=None,
service_principal=None, client_secret=None,
no_ssh_key=False,
disable_rbac=None,
enable_rbac=None,
vm_set_type=None,
skip_subnet_role_assignment=False,
enable_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
network_plugin=None,
network_policy=None,
uptime_sla=False,
pod_cidr=None,
service_cidr=None,
dns_service_ip=None,
docker_bridge_address=None,
load_balancer_sku=None,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
outbound_type=None,
enable_addons=None,
workspace_resource_id=None,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
min_count=None,
max_count=None,
aad_client_app_id=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_tenant_id=None,
tags=None,
zones=None,
enable_node_public_ip=False,
generate_ssh_keys=False, # pylint: disable=unused-argument
api_server_authorized_ip_ranges=None,
enable_private_cluster=False,
enable_managed_identity=False,
assign_identity=None,
attach_acr=None,
enable_aad=False,
aad_admin_group_object_ids=None,
aci_subnet_name=None,
no_wait=False):
_validate_ssh_key(no_ssh_key, ssh_key_value)
subscription_id = get_subscription_id(cmd.cli_ctx)
if not dns_name_prefix:
dns_name_prefix = _get_default_dns_prefix(name, resource_group_name, subscription_id)
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
if location is None:
location = rg_location
vm_set_type = _set_vm_set_type(vm_set_type, kubernetes_version)
load_balancer_sku = set_load_balancer_sku(load_balancer_sku, kubernetes_version)
if api_server_authorized_ip_ranges and load_balancer_sku == "basic":
raise CLIError('--api-server-authorized-ip-ranges can only be used with standard load balancer')
agent_pool_profile = ManagedClusterAgentPoolProfile(
name=_trim_nodepoolname(nodepool_name), # Must be 12 chars or less before ACS RP adds to it
tags=nodepool_tags,
node_labels=nodepool_labels,
count=int(node_count),
vm_size=node_vm_size,
os_type="Linux",
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
availability_zones=zones,
enable_node_public_ip=enable_node_public_ip,
max_pods=int(max_pods) if max_pods else None,
type=vm_set_type,
mode="System"
)
if node_osdisk_size:
agent_pool_profile.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool_profile.os_disk_type = node_osdisk_type
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool_profile)
linux_profile = None
# LinuxProfile is just used for SSH access to VMs, so omit it if --no-ssh-key was specified.
if not no_ssh_key:
ssh_config = ContainerServiceSshConfiguration(
public_keys=[ContainerServiceSshPublicKey(key_data=ssh_key_value)])
linux_profile = ContainerServiceLinuxProfile(admin_username=admin_username, ssh=ssh_config)
windows_profile = None
if windows_admin_username or windows_admin_password:
# To avoid that windows_admin_password is set but windows_admin_username is not
if windows_admin_username is None:
try:
from knack.prompting import prompt
windows_admin_username = prompt('windows_admin_username: ')
# The validation for admin_username in ManagedClusterWindowsProfile will fail even if
# users still set windows_admin_username to empty here
except NoTTYException:
raise CLIError('Please specify username for Windows in non-interactive mode.')
if windows_admin_password is None:
try:
windows_admin_password = prompt_pass(
msg='windows-admin-password: ', confirm=True)
except NoTTYException:
raise CLIError(
'Please specify both username and password in non-interactive mode.')
windows_license_type = None
if enable_ahub:
windows_license_type = 'Windows_Server'
windows_profile = ManagedClusterWindowsProfile(
admin_username=windows_admin_username,
admin_password=windows_admin_password,
license_type=windows_license_type)
# Skip create service principal profile for the cluster if the cluster
# enables managed identity and customer doesn't explicitly provide a service principal.
service_principal_profile = None
principal_obj = None
if not(enable_managed_identity and not service_principal and not client_secret):
principal_obj = _ensure_aks_service_principal(cmd.cli_ctx,
service_principal=service_principal, client_secret=client_secret,
subscription_id=subscription_id, dns_name_prefix=dns_name_prefix,
location=location, name=name)
service_principal_profile = ManagedClusterServicePrincipalProfile(
client_id=principal_obj.get("service_principal"),
secret=principal_obj.get("client_secret"),
key_vault_secret_ref=None)
if (vnet_subnet_id and not skip_subnet_role_assignment and
not subnet_role_assignment_exists(cmd.cli_ctx, vnet_subnet_id)):
# if service_principal_profile is None, then this cluster is an MSI cluster,
# and the service principal does not exist. Two cases:
# 1. For system assigned identity, we just tell user to grant the
# permission after the cluster is created to keep consistent with portal experience.
# 2. For user assigned identity, we can grant needed permission to
# user provided user assigned identity before creating managed cluster.
if service_principal_profile is None and not assign_identity:
logger.warning('The cluster is an MSI cluster using system assigned identity, '
'please manually grant Network Contributor role to the '
'system assigned identity after the cluster is created, see '
'https://docs.microsoft.com/en-us/azure/aks/use-managed-identity')
else:
scope = vnet_subnet_id
identity_client_id = ""
if assign_identity:
identity_client_id = _get_user_assigned_identity_client_id(cmd.cli_ctx, assign_identity)
else:
identity_client_id = service_principal_profile.client_id
if not _add_role_assignment(cmd.cli_ctx, 'Network Contributor',
identity_client_id, scope=scope):
logger.warning('Could not create a role assignment for subnet. '
'Are you an Owner on this subscription?')
load_balancer_profile = create_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
if attach_acr:
if enable_managed_identity:
if no_wait:
raise CLIError('When --attach-acr and --enable-managed-identity are both specified, '
'--no-wait is not allowed, please wait until the whole operation succeeds.')
# Attach acr operation will be handled after the cluster is created
else:
_ensure_aks_acr(cmd.cli_ctx,
client_id=service_principal_profile.client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
outbound_type = _set_outbound_type(outbound_type, vnet_subnet_id, load_balancer_sku, load_balancer_profile)
network_profile = None
if any([network_plugin, pod_cidr, service_cidr, dns_service_ip,
docker_bridge_address, network_policy]):
if not network_plugin:
raise CLIError('Please explicitly specify the network plugin type')
if pod_cidr and network_plugin == "azure":
raise CLIError('Please use kubenet as the network plugin type when pod_cidr is specified')
network_profile = ContainerServiceNetworkProfile(
network_plugin=network_plugin,
pod_cidr=pod_cidr,
service_cidr=service_cidr,
dns_service_ip=dns_service_ip,
docker_bridge_cidr=docker_bridge_address,
network_policy=network_policy,
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type
)
else:
if load_balancer_sku.lower() == "standard" or load_balancer_profile:
network_profile = ContainerServiceNetworkProfile(
network_plugin="kubenet",
load_balancer_sku=load_balancer_sku.lower(),
load_balancer_profile=load_balancer_profile,
outbound_type=outbound_type,
)
if load_balancer_sku.lower() == "basic":
network_profile = ContainerServiceNetworkProfile(
load_balancer_sku=load_balancer_sku.lower(),
)
addon_profiles = _handle_addons_args(
cmd,
enable_addons,
subscription_id,
resource_group_name,
{},
workspace_resource_id,
aci_subnet_name,
vnet_subnet_id
)
monitoring = False
if CONST_MONITORING_ADDON_NAME in addon_profiles:
monitoring = True
_ensure_container_insights_for_monitoring(cmd, addon_profiles[CONST_MONITORING_ADDON_NAME])
aad_profile = None
if enable_aad:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('"--enable-aad" cannot be used together with '
'"--aad-client-app-id/--aad-server-app-id/--aad-server-app-secret"')
aad_profile = ManagedClusterAADProfile(
managed=True,
admin_group_object_ids=_parse_comma_separated_list(aad_admin_group_object_ids),
tenant_id=aad_tenant_id
)
else:
if any([aad_client_app_id, aad_server_app_id, aad_server_app_secret, aad_tenant_id]):
if aad_tenant_id is None:
profile = Profile(cli_ctx=cmd.cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
aad_profile = ManagedClusterAADProfile(
client_app_id=aad_client_app_id,
server_app_id=aad_server_app_id,
server_app_secret=aad_server_app_secret,
tenant_id=aad_tenant_id
)
api_server_access_profile = None
if enable_private_cluster and load_balancer_sku.lower() != "standard":
raise CLIError("Please use standard load balancer for private cluster")
if api_server_authorized_ip_ranges or enable_private_cluster:
api_server_access_profile = _populate_api_server_access_profile(
api_server_authorized_ip_ranges,
enable_private_cluster=enable_private_cluster
)
# Check that both --disable-rbac and --enable-rbac weren't provided
if all([disable_rbac, enable_rbac]):
raise CLIError('specify either "--disable-rbac" or "--enable-rbac", not both.')
identity = None
if not enable_managed_identity and assign_identity:
raise ArgumentUsageError('--assign-identity can only be specified when --enable-managed-identity is specified')
if enable_managed_identity and not assign_identity:
identity = ManagedClusterIdentity(
type="SystemAssigned"
)
elif enable_managed_identity and assign_identity:
user_assigned_identity = {
assign_identity: ManagedClusterIdentityUserAssignedIdentitiesValue()
}
identity = ManagedClusterIdentity(
type="UserAssigned",
user_assigned_identities=user_assigned_identity
)
mc = ManagedCluster(
location=location,
tags=tags,
dns_prefix=dns_name_prefix,
kubernetes_version=kubernetes_version,
enable_rbac=not disable_rbac,
agent_pool_profiles=[agent_pool_profile],
linux_profile=linux_profile,
windows_profile=windows_profile,
service_principal_profile=service_principal_profile,
network_profile=network_profile,
addon_profiles=addon_profiles,
aad_profile=aad_profile,
auto_scaler_profile=cluster_autoscaler_profile,
api_server_access_profile=api_server_access_profile,
identity=identity,
disk_encryption_set_id=node_osdisk_diskencryptionset_id
)
if uptime_sla:
mc.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
# Add AAD session key to header.
# If principal_obj is None, we will not add this header, this can happen
# when the cluster enables managed identity. In this case, the header is useless
# and that's OK to not add this header
custom_headers = None
if principal_obj:
custom_headers = {'Ocp-Aad-Session-Key': principal_obj.get("aad_session_key")}
# Due to SPN replication latency, we do a few retries here
max_retry = 30
retry_exception = Exception(None)
for _ in range(0, max_retry):
try:
need_pull_for_result = monitoring or (enable_managed_identity and attach_acr)
if need_pull_for_result:
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc))
else:
result = sdk_no_wait(no_wait,
client.create_or_update,
resource_group_name=resource_group_name,
resource_name=name,
parameters=mc,
custom_headers=custom_headers)
if monitoring:
cloud_name = cmd.cli_ctx.cloud.name
# add cluster spn/msi Monitoring Metrics Publisher role assignment to publish metrics to MDM
# mdm metrics is supported only in azure public cloud, so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
if enable_managed_identity and attach_acr:
if result.identity_profile is None or result.identity_profile["kubeletidentity"] is None:
logger.warning('Your cluster is successfully created, but we failed to attach acr to it, '
'you can manually grant permission to the identity named <ClUSTER_NAME>-agentpool '
'in MC_ resource group to give it permission to pull from ACR.')
else:
kubelet_identity_client_id = result.identity_profile["kubeletidentity"].client_id
_ensure_aks_acr(cmd.cli_ctx,
client_id=kubelet_identity_client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
return result
except CloudError as ex:
retry_exception = ex
if 'not found in Active Directory tenant' in ex.message:
time.sleep(3)
else:
raise ex
raise retry_exception
def aks_disable_addons(cmd, client, resource_group_name, name, addons, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(
cmd,
instance,
subscription_id,
resource_group_name,
addons,
enable=False,
no_wait=no_wait
)
# send the managed cluster representation to update the addon profiles
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def aks_enable_addons(cmd, client, resource_group_name, name, addons, workspace_resource_id=None,
subnet_name=None, no_wait=False):
instance = client.get(resource_group_name, name)
subscription_id = get_subscription_id(cmd.cli_ctx)
instance = _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable=True,
workspace_resource_id=workspace_resource_id, subnet_name=subnet_name, no_wait=no_wait)
if (CONST_MONITORING_ADDON_NAME in instance.addon_profiles and
instance.addon_profiles[CONST_MONITORING_ADDON_NAME].enabled):
_ensure_container_insights_for_monitoring(cmd, instance.addon_profiles[CONST_MONITORING_ADDON_NAME])
# adding a wait here since we rely on the result for role assignment
result = LongRunningOperation(cmd.cli_ctx)(client.create_or_update(resource_group_name, name, instance))
cloud_name = cmd.cli_ctx.cloud.name
# mdm metrics supported only in Azure Public cloud so add the role assignment only in this cloud
if cloud_name.lower() == 'azurecloud':
from msrestazure.tools import resource_id
cluster_resource_id = resource_id(
subscription=subscription_id,
resource_group=resource_group_name,
namespace='Microsoft.ContainerService', type='managedClusters',
name=name
)
_add_monitoring_role_assignment(result, cluster_resource_id, cmd)
else:
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name, name, instance)
return result
def aks_get_versions(cmd, client, location):
return client.list_orchestrators(location, resource_type='managedClusters')
def aks_get_credentials(cmd, client, resource_group_name, name, admin=False,
path=os.path.join(os.path.expanduser('~'), '.kube', 'config'),
overwrite_existing=False, context_name=None):
credentialResults = None
if admin:
credentialResults = client.list_cluster_admin_credentials(resource_group_name, name)
else:
credentialResults = client.list_cluster_user_credentials(resource_group_name, name)
if not credentialResults:
raise CLIError("No Kubernetes credentials found.")
try:
kubeconfig = credentialResults.kubeconfigs[0].value.decode(encoding='UTF-8')
_print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name)
except (IndexError, ValueError):
raise CLIError("Fail to find kubeconfig file.")
def aks_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_nulls(list(managed_clusters))
def aks_show(cmd, client, resource_group_name, name):
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
def aks_update_credentials(cmd, client, resource_group_name, name,
reset_service_principal=False,
reset_aad=False,
service_principal=None,
client_secret=None,
aad_server_app_id=None,
aad_server_app_secret=None,
aad_client_app_id=None,
aad_tenant_id=None,
no_wait=False):
if bool(reset_service_principal) == bool(reset_aad):
raise CLIError('usage error: --reset-service-principal | --reset-aad-profile')
if reset_service_principal:
if service_principal is None or client_secret is None:
raise CLIError('usage error: --reset-service-principal --service-principal ID --client-secret SECRET')
return sdk_no_wait(no_wait,
client.reset_service_principal_profile,
resource_group_name,
name, service_principal, client_secret)
if not all([aad_client_app_id, aad_server_app_id, aad_server_app_secret]):
raise CLIError('usage error: --reset-aad --aad-client-app-id ID --aad-server-app-id ID '
'--aad-server-app-secret SECRET [--aad-tenant-id ID]')
parameters = {
'clientAppID': aad_client_app_id,
'serverAppID': aad_server_app_id,
'serverAppSecret': aad_server_app_secret,
'tenantID': aad_tenant_id
}
return sdk_no_wait(no_wait,
client.reset_aad_profile,
resource_group_name,
name, parameters)
def aks_scale(cmd, client, resource_group_name, name, node_count, nodepool_name="", no_wait=False):
instance = client.get(resource_group_name, name)
if len(instance.agent_pool_profiles) > 1 and nodepool_name == "":
raise CLIError('There are more than one node pool in the cluster. '
'Please specify nodepool name or use az aks nodepool command to scale node pool')
for agent_profile in instance.agent_pool_profiles:
if agent_profile.name == nodepool_name or (nodepool_name == "" and len(instance.agent_pool_profiles) == 1):
if agent_profile.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
agent_profile.count = int(node_count) # pylint: disable=no-member
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
raise CLIError('The nodepool "{}" was not found.'.format(nodepool_name))
# pylint: disable=inconsistent-return-statements
def aks_update(cmd, client, resource_group_name, name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
cluster_autoscaler_profile=None,
min_count=None, max_count=None,
uptime_sla=False,
load_balancer_managed_outbound_ip_count=None,
load_balancer_outbound_ips=None,
load_balancer_outbound_ip_prefixes=None,
load_balancer_outbound_ports=None,
load_balancer_idle_timeout=None,
attach_acr=None,
detach_acr=None,
api_server_authorized_ip_ranges=None,
enable_aad=False,
aad_tenant_id=None,
aad_admin_group_object_ids=None,
enable_ahub=False,
disable_ahub=False,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
update_lb_profile = is_load_balancer_profile_provided(load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout)
update_aad_profile = not (aad_tenant_id is None and aad_admin_group_object_ids is None)
# pylint: disable=too-many-boolean-expressions
if (update_autoscaler != 1 and cluster_autoscaler_profile is None and
not update_lb_profile and
not attach_acr and
not detach_acr and
not uptime_sla and
api_server_authorized_ip_ranges is None and
not enable_aad and
not update_aad_profile and
not enable_ahub and
not disable_ahub):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--cluster-autoscaler-profile" or '
'"--load-balancer-managed-outbound-ip-count" or'
'"--load-balancer-outbound-ips" or '
'"--load-balancer-outbound-ip-prefixes" or'
'"--load-balancer-outbound-ports" or'
'"--load-balancer-idle-timeout" or'
'"--attach-acr" or "--detach-acr" or'
'"--uptime-sla" or'
'"--api-server-authorized-ip-ranges" or '
'"--enable-aad" or '
'"--aad-tenant-id" or '
'"--aad-admin-group-object-ids" or '
'"--enable-ahub" or '
'"--disable-ahub"')
instance = client.get(resource_group_name, name)
# For multi-agent pool, use the az aks nodepool command
if update_autoscaler > 0 and len(instance.agent_pool_profiles) > 1:
raise CLIError('There are more than one node pool in the cluster. Please use "az aks nodepool" command '
'to update per node pool auto scaler settings')
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already enabled for this node pool.\n'
'Please run "az aks --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
instance.agent_pool_profiles[0].enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
raise CLIError('Cluster autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.agent_pool_profiles[0].min_count = int(min_count)
instance.agent_pool_profiles[0].max_count = int(max_count)
if disable_cluster_autoscaler:
if not instance.agent_pool_profiles[0].enable_auto_scaling:
logger.warning('Cluster autoscaler is already disabled for this node pool.')
return None
instance.agent_pool_profiles[0].enable_auto_scaling = False
instance.agent_pool_profiles[0].min_count = None
instance.agent_pool_profiles[0].max_count = None
# if intention is to clear autoscaler profile
if cluster_autoscaler_profile == {}:
instance.auto_scaler_profile = {}
# else profile is provided, update instance profile if it exists
elif cluster_autoscaler_profile:
instance.auto_scaler_profile = _update_dict(instance.auto_scaler_profile.__dict__,
dict((key.replace("-", "_"), value)
for (key, value) in cluster_autoscaler_profile.items())) \
if instance.auto_scaler_profile else cluster_autoscaler_profile
subscription_id = get_subscription_id(cmd.cli_ctx)
client_id = ""
if instance.identity is not None and instance.identity.type == "SystemAssigned":
if instance.identity_profile is None or instance.identity_profile["kubeletidentity"] is None:
raise CLIError('Unexpected error getting kubelet\'s identity for the cluster. '
'Please do not set --attach-acr or --detach-acr. '
'You can manually grant or revoke permission to the identity named '
'<ClUSTER_NAME>-agentpool in MC_ resource group to access ACR.')
client_id = instance.identity_profile["kubeletidentity"].client_id
else:
client_id = instance.service_principal_profile.client_id
if not client_id:
raise CLIError('Cannot get the AKS cluster\'s service principal.')
if attach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=attach_acr,
subscription_id=subscription_id)
if detach_acr:
_ensure_aks_acr(cmd.cli_ctx,
client_id=client_id,
acr_name_or_id=detach_acr,
subscription_id=subscription_id,
detach=True)
if uptime_sla:
instance.sku = ManagedClusterSKU(
name="Basic",
tier="Paid"
)
if update_lb_profile:
instance.network_profile.load_balancer_profile = update_load_balancer_profile(
load_balancer_managed_outbound_ip_count,
load_balancer_outbound_ips,
load_balancer_outbound_ip_prefixes,
load_balancer_outbound_ports,
load_balancer_idle_timeout,
instance.network_profile.load_balancer_profile)
# empty string is valid as it disables ip whitelisting
if api_server_authorized_ip_ranges is not None:
instance.api_server_access_profile = \
_populate_api_server_access_profile(api_server_authorized_ip_ranges, instance=instance)
if enable_aad:
if instance.aad_profile is not None and instance.aad_profile.managed:
raise CLIError('Cannot specify "--enable-aad" if managed AAD is already enabled')
instance.aad_profile = ManagedClusterAADProfile(
managed=True
)
if update_aad_profile:
if instance.aad_profile is None or not instance.aad_profile.managed:
raise CLIError('Cannot specify "--aad-tenant-id/--aad-admin-group-object-ids"'
' if managed AAD is not enabled')
if aad_tenant_id is not None:
instance.aad_profile.tenant_id = aad_tenant_id
if aad_admin_group_object_ids is not None:
instance.aad_profile.admin_group_object_ids = _parse_comma_separated_list(aad_admin_group_object_ids)
if enable_ahub and disable_ahub:
raise CLIError('Cannot specify "--enable-ahub" and "--disable-ahub" at the same time')
if enable_ahub:
instance.windows_profile.license_type = 'Windows_Server'
if disable_ahub:
instance.windows_profile.license_type = 'None'
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
# pylint: disable=unused-argument,inconsistent-return-statements,too-many-return-statements
def aks_upgrade(cmd,
client,
resource_group_name, name,
kubernetes_version='',
control_plane_only=False,
node_image_only=False,
no_wait=False,
yes=False):
msg = 'Kubernetes may be unavailable during cluster upgrades.\n Are you sure you want to perform this operation?'
if not yes and not prompt_y_n(msg, default="n"):
return None
instance = client.get(resource_group_name, name)
vmas_cluster = False
for agent_profile in instance.agent_pool_profiles:
if agent_profile.type.lower() == "availabilityset":
vmas_cluster = True
break
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version. '
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
msg = "This node image upgrade operation will run across every node pool in the cluster" \
"and might take a while, do you wish to continue?"
if not yes and not prompt_y_n(msg, default="n"):
return None
# This only provide convenience for customer at client side so they can run az aks upgrade to upgrade all
# nodepools of a cluster. The SDK only support upgrade single nodepool at a time.
for agent_pool_profile in instance.agent_pool_profiles:
if vmas_cluster:
raise CLIError('This cluster is not using VirtualMachineScaleSets. Node image upgrade only operation '
'can only be applied on VirtualMachineScaleSets cluster.')
_upgrade_single_nodepool_image_version(True, client, resource_group_name, name, agent_pool_profile.name)
mc = client.get(resource_group_name, name)
return _remove_nulls([mc])[0]
if instance.kubernetes_version == kubernetes_version:
if instance.provisioning_state == "Succeeded":
logger.warning("The cluster is already on version %s and is not in a failed state. No operations "
"will occur when upgrading to the same version if the cluster is not in a failed state.",
instance.kubernetes_version)
elif instance.provisioning_state == "Failed":
logger.warning("Cluster currently in failed state. Proceeding with upgrade to existing version %s to "
"attempt resolution of failed cluster state.", instance.kubernetes_version)
upgrade_all = False
instance.kubernetes_version = kubernetes_version
# for legacy clusters, we always upgrade node pools with CCP.
if instance.max_agent_pools < 8 or vmas_cluster:
if control_plane_only:
msg = ("Legacy clusters do not support control plane only upgrade. All node pools will be "
"upgraded to {} as well. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
if not control_plane_only:
msg = ("Since control-plane-only argument is not specified, this will upgrade the control plane "
"AND all nodepools to version {}. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
upgrade_all = True
else:
msg = ("Since control-plane-only argument is specified, this will upgrade only the control plane to {}. "
"Node pool will not change. Continue?").format(instance.kubernetes_version)
if not yes and not prompt_y_n(msg, default="n"):
return None
if upgrade_all:
for agent_profile in instance.agent_pool_profiles:
agent_profile.orchestrator_version = kubernetes_version
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def _upgrade_single_nodepool_image_version(no_wait, client, resource_group_name, cluster_name, nodepool_name):
return sdk_no_wait(no_wait, client.upgrade_node_image_version, resource_group_name, cluster_name, nodepool_name)
DEV_SPACES_EXTENSION_NAME = 'dev-spaces'
DEV_SPACES_EXTENSION_MODULE = 'azext_dev_spaces.custom'
def aks_use_dev_spaces(cmd, client, name, resource_group_name, update=False, space_name=None,
endpoint_type='Public', prompt=False):
"""
Use Azure Dev Spaces with a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param update: Update to the latest Azure Dev Spaces client components.
:type update: bool
:param space_name: Name of the new or existing dev space to select. Defaults to an \
interactive selection experience.
:type space_name: String
:param endpoint_type: The endpoint type to be used for a Azure Dev Spaces controller. \
See https://aka.ms/azds-networking for more information.
:type endpoint_type: String
:param prompt: Do not prompt for confirmation. Requires --space.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE, update):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_use_dev_spaces(name, resource_group_name, update, space_name, endpoint_type, prompt)
except TypeError:
raise CLIError("Use '--update' option to get the latest Azure Dev Spaces client components.")
except AttributeError as ae:
raise CLIError(ae)
def aks_remove_dev_spaces(cmd, client, name, resource_group_name, prompt=False):
"""
Remove Azure Dev Spaces from a managed Kubernetes cluster.
:param name: Name of the managed cluster.
:type name: String
:param resource_group_name: Name of resource group. You can configure the default group. \
Using 'az configure --defaults group=<name>'.
:type resource_group_name: String
:param prompt: Do not prompt for confirmation.
:type prompt: bool
"""
if _get_or_add_extension(cmd, DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE):
azext_custom = _get_azext_module(DEV_SPACES_EXTENSION_NAME, DEV_SPACES_EXTENSION_MODULE)
try:
azext_custom.ads_remove_dev_spaces(name, resource_group_name, prompt)
except AttributeError as ae:
raise CLIError(ae)
def aks_rotate_certs(cmd, client, resource_group_name, name, no_wait=True):
return sdk_no_wait(no_wait, client.rotate_cluster_certificates, resource_group_name, name)
def _update_addons(cmd, instance, subscription_id, resource_group_name, addons, enable, workspace_resource_id=None,
subnet_name=None, no_wait=False):
# parse the comma-separated addons argument
addon_args = addons.split(',')
addon_profiles = instance.addon_profiles or {}
os_type = 'Linux'
# for each addons argument
for addon_arg in addon_args:
if addon_arg not in ADDONS:
raise CLIError("Invalid addon name: {}.".format(addon_arg))
addon = ADDONS[addon_arg]
if addon == CONST_VIRTUAL_NODE_ADDON_NAME:
# only linux is supported for now, in the future this will be a user flag
addon += os_type
# honor addon names defined in Azure CLI
for key in list(addon_profiles):
if key.lower() == addon.lower() and key != addon:
addon_profiles[addon] = addon_profiles.pop(key)
if enable:
# add new addons or update existing ones and enable them
addon_profile = addon_profiles.get(addon, ManagedClusterAddonProfile(enabled=False))
# special config handling for certain addons
if addon == CONST_MONITORING_ADDON_NAME:
if addon_profile.enabled:
raise CLIError('The monitoring addon is already enabled for this managed cluster.\n'
'To change monitoring configuration, run "az aks disable-addons -a monitoring"'
'before enabling it again.')
if not workspace_resource_id:
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd,
subscription_id,
resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profile.config = {CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id}
elif addon == (CONST_VIRTUAL_NODE_ADDON_NAME + os_type):
if addon_profile.enabled:
raise CLIError('The virtual-node addon is already enabled for this managed cluster.\n'
'To change virtual-node configuration, run '
'"az aks disable-addons -a virtual-node -g {resource_group_name}" '
'before enabling it again.')
if not subnet_name:
raise CLIError('The aci-connector addon requires setting a subnet name.')
addon_profile.config = {CONST_VIRTUAL_NODE_SUBNET_NAME: subnet_name}
addon_profiles[addon] = addon_profile
else:
if addon not in addon_profiles:
if addon == CONST_KUBE_DASHBOARD_ADDON_NAME:
addon_profiles[addon] = ManagedClusterAddonProfile(enabled=False)
else:
raise CLIError("The addon {} is not installed.".format(addon))
addon_profiles[addon].config = None
addon_profiles[addon].enabled = enable
instance.addon_profiles = addon_profiles
# null out the SP and AAD profile because otherwise validation complains
instance.service_principal_profile = None
instance.aad_profile = None
return instance
def _get_azext_module(extension_name, module_name):
try:
# Adding the installed extension in the path
from azure.cli.core.extension.operations import add_extension_to_path
add_extension_to_path(extension_name)
# Import the extension module
from importlib import import_module
azext_custom = import_module(module_name)
return azext_custom
except ImportError as ie:
raise CLIError(ie)
def _handle_addons_args(cmd, addons_str, subscription_id, resource_group_name, addon_profiles=None,
workspace_resource_id=None, aci_subnet_name=None, vnet_subnet_id=None):
if not addon_profiles:
addon_profiles = {}
addons = addons_str.split(',') if addons_str else []
if 'http_application_routing' in addons:
addon_profiles[CONST_HTTP_APPLICATION_ROUTING_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('http_application_routing')
if 'kube-dashboard' in addons:
addon_profiles[CONST_KUBE_DASHBOARD_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('kube-dashboard')
# TODO: can we help the user find a workspace resource ID?
if 'monitoring' in addons:
if not workspace_resource_id:
# use default workspace if exists else create default workspace
workspace_resource_id = _ensure_default_log_analytics_workspace_for_monitoring(
cmd, subscription_id, resource_group_name)
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
addon_profiles[CONST_MONITORING_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True, config={CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID: workspace_resource_id})
addons.remove('monitoring')
# error out if '--enable-addons=monitoring' isn't set but workspace_resource_id is
elif workspace_resource_id:
raise CLIError('"--workspace-resource-id" requires "--enable-addons monitoring".')
if 'azure-policy' in addons:
addon_profiles[CONST_AZURE_POLICY_ADDON_NAME] = ManagedClusterAddonProfile(enabled=True)
addons.remove('azure-policy')
if 'virtual-node' in addons:
if not aci_subnet_name or not vnet_subnet_id:
raise CLIError('"--enable-addons virtual-node" requires "--aci-subnet-name" and "--vnet-subnet-id".')
# TODO: how about aciConnectorwindows, what is its addon name?
addon_profiles[CONST_VIRTUAL_NODE_ADDON_NAME] = ManagedClusterAddonProfile(
enabled=True,
config={CONST_VIRTUAL_NODE_SUBNET_NAME: aci_subnet_name}
)
addons.remove('virtual-node')
# error out if any (unrecognized) addons remain
if addons:
raise CLIError('"{}" {} not recognized by the --enable-addons argument.'.format(
",".join(addons), "are" if len(addons) > 1 else "is"))
return addon_profiles
def _install_dev_spaces_extension(cmd, extension_name):
try:
from azure.cli.core.extension import operations
operations.add_extension(cmd=cmd, extension_name=extension_name)
except Exception: # nopa pylint: disable=broad-except
return False
return True
def _update_dev_spaces_extension(cmd, extension_name, extension_module):
from azure.cli.core.extension import ExtensionNotInstalledException
try:
from azure.cli.core.extension import operations
operations.update_extension(cmd=cmd, extension_name=extension_name)
operations.reload_extension(extension_name=extension_name)
except CLIError as err:
logger.info(err)
except ExtensionNotInstalledException as err:
logger.debug(err)
return False
except ModuleNotFoundError as err:
logger.debug(err)
logger.error("Error occurred attempting to load the extension module. Use --debug for more information.")
return False
return True
def _get_or_add_extension(cmd, extension_name, extension_module, update=False):
from azure.cli.core.extension import (ExtensionNotInstalledException, get_extension)
try:
get_extension(extension_name)
if update:
return _update_dev_spaces_extension(cmd, extension_name, extension_module)
except ExtensionNotInstalledException:
return _install_dev_spaces_extension(cmd, extension_name)
return True
def _ensure_default_log_analytics_workspace_for_monitoring(cmd, subscription_id, resource_group_name):
# mapping for azure public cloud
# log analytics workspaces cannot be created in WCUS region due to capacity limits
# so mapped to EUS per discussion with log analytics team
AzureCloudLocationToOmsRegionCodeMap = {
"australiasoutheast": "ASE",
"australiaeast": "EAU",
"australiacentral": "CAU",
"canadacentral": "CCA",
"centralindia": "CIN",
"centralus": "CUS",
"eastasia": "EA",
"eastus": "EUS",
"eastus2": "EUS2",
"eastus2euap": "EAP",
"francecentral": "PAR",
"japaneast": "EJP",
"koreacentral": "SE",
"northeurope": "NEU",
"southcentralus": "SCUS",
"southeastasia": "SEA",
"uksouth": "SUK",
"usgovvirginia": "USGV",
"westcentralus": "EUS",
"westeurope": "WEU",
"westus": "WUS",
"westus2": "WUS2"
}
AzureCloudRegionToOmsRegionMap = {
"australiacentral": "australiacentral",
"australiacentral2": "australiacentral",
"australiaeast": "australiaeast",
"australiasoutheast": "australiasoutheast",
"brazilsouth": "southcentralus",
"canadacentral": "canadacentral",
"canadaeast": "canadacentral",
"centralus": "centralus",
"centralindia": "centralindia",
"eastasia": "eastasia",
"eastus": "eastus",
"eastus2": "eastus2",
"francecentral": "francecentral",
"francesouth": "francecentral",
"japaneast": "japaneast",
"japanwest": "japaneast",
"koreacentral": "koreacentral",
"koreasouth": "koreacentral",
"northcentralus": "eastus",
"northeurope": "northeurope",
"southafricanorth": "westeurope",
"southafricawest": "westeurope",
"southcentralus": "southcentralus",
"southeastasia": "southeastasia",
"southindia": "centralindia",
"uksouth": "uksouth",
"ukwest": "uksouth",
"westcentralus": "eastus",
"westeurope": "westeurope",
"westindia": "centralindia",
"westus": "westus",
"westus2": "westus2"
}
# mapping for azure china cloud
# currently log analytics supported only China East 2 region
AzureChinaLocationToOmsRegionCodeMap = {
"chinaeast": "EAST2",
"chinaeast2": "EAST2",
"chinanorth": "EAST2",
"chinanorth2": "EAST2"
}
AzureChinaRegionToOmsRegionMap = {
"chinaeast": "chinaeast2",
"chinaeast2": "chinaeast2",
"chinanorth": "chinaeast2",
"chinanorth2": "chinaeast2"
}
# mapping for azure us governmner cloud
AzureFairfaxLocationToOmsRegionCodeMap = {
"usgovvirginia": "USGV"
}
AzureFairfaxRegionToOmsRegionMap = {
"usgovvirginia": "usgovvirginia"
}
rg_location = _get_rg_location(cmd.cli_ctx, resource_group_name)
cloud_name = cmd.cli_ctx.cloud.name
workspace_region = "eastus"
workspace_region_code = "EUS"
# sanity check that locations and clouds match.
if ((cloud_name.lower() == 'azurecloud' and AzureChinaRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurecloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurecloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azurechinacloud' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azurechinacloud' and AzureFairfaxRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azurechinacloud) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if ((cloud_name.lower() == 'azureusgovernment' and AzureCloudRegionToOmsRegionMap.get(rg_location, False)) or
(cloud_name.lower() == 'azureusgovernment' and AzureChinaRegionToOmsRegionMap.get(rg_location, False))):
raise CLIError('Wrong cloud (azureusgovernment) setting for region {}, please use "az cloud set ..."'
.format(rg_location))
if cloud_name.lower() == 'azurecloud':
workspace_region = AzureCloudRegionToOmsRegionMap.get(rg_location, "eastus")
workspace_region_code = AzureCloudLocationToOmsRegionCodeMap.get(workspace_region, "EUS")
elif cloud_name.lower() == 'azurechinacloud':
workspace_region = AzureChinaRegionToOmsRegionMap.get(rg_location, "chinaeast2")
workspace_region_code = AzureChinaLocationToOmsRegionCodeMap.get(workspace_region, "EAST2")
elif cloud_name.lower() == 'azureusgovernment':
workspace_region = AzureFairfaxRegionToOmsRegionMap.get(rg_location, "usgovvirginia")
workspace_region_code = AzureFairfaxLocationToOmsRegionCodeMap.get(workspace_region, "USGV")
else:
workspace_region = rg_location
workspace_region_code = rg_location.upper()
default_workspace_resource_group = 'DefaultResourceGroup-' + workspace_region_code
default_workspace_name = 'DefaultWorkspace-{0}-{1}'.format(subscription_id, workspace_region_code)
default_workspace_resource_id = '/subscriptions/{0}/resourceGroups/{1}/providers/Microsoft.OperationalInsights' \
'/workspaces/{2}'.format(subscription_id, default_workspace_resource_group, default_workspace_name)
resource_groups = cf_resource_groups(cmd.cli_ctx, subscription_id)
resources = cf_resources(cmd.cli_ctx, subscription_id)
# check if default RG exists
if resource_groups.check_existence(default_workspace_resource_group):
try:
resource = resources.get_by_id(default_workspace_resource_id, '2015-11-01-preview')
return resource.id
except CloudError as ex:
if ex.status_code != 404:
raise ex
else:
resource_groups.create_or_update(default_workspace_resource_group, {'location': workspace_region})
default_workspace_params = {
'location': workspace_region,
'properties': {
'sku': {
'name': 'standalone'
}
}
}
async_poller = resources.create_or_update_by_id(default_workspace_resource_id, '2015-11-01-preview',
default_workspace_params)
ws_resource_id = ''
while True:
result = async_poller.result(15)
if async_poller.done():
ws_resource_id = result.id
break
return ws_resource_id
def _ensure_container_insights_for_monitoring(cmd, addon):
# Workaround for this addon key which has been seen lowercased in the wild.
for key in list(addon.config):
if (key.lower() == CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID.lower() and
key != CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID):
addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID] = addon.config.pop(key)
workspace_resource_id = addon.config[CONST_MONITORING_LOG_ANALYTICS_WORKSPACE_RESOURCE_ID]
workspace_resource_id = workspace_resource_id.strip()
if not workspace_resource_id.startswith('/'):
workspace_resource_id = '/' + workspace_resource_id
if workspace_resource_id.endswith('/'):
workspace_resource_id = workspace_resource_id.rstrip('/')
# extract subscription ID and resource group from workspace_resource_id URL
try:
subscription_id = workspace_resource_id.split('/')[2]
resource_group = workspace_resource_id.split('/')[4]
except IndexError:
raise CLIError('Could not locate resource group in workspace-resource-id URL.')
# region of workspace can be different from region of RG so find the location of the workspace_resource_id
resources = cf_resources(cmd.cli_ctx, subscription_id)
try:
resource = resources.get_by_id(workspace_resource_id, '2015-11-01-preview')
location = resource.location
except CloudError as ex:
raise ex
unix_time_in_millis = int(
(datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds() * 1000.0)
solution_deployment_name = 'ContainerInsights-{}'.format(unix_time_in_millis)
# pylint: disable=line-too-long
template = {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {
"workspaceResourceId": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics Resource ID"
}
},
"workspaceRegion": {
"type": "string",
"metadata": {
"description": "Azure Monitor Log Analytics workspace region"
}
},
"solutionDeploymentName": {
"type": "string",
"metadata": {
"description": "Name of the solution deployment"
}
}
},
"resources": [
{
"type": "Microsoft.Resources/deployments",
"name": "[parameters('solutionDeploymentName')]",
"apiVersion": "2017-05-10",
"subscriptionId": "[split(parameters('workspaceResourceId'),'/')[2]]",
"resourceGroup": "[split(parameters('workspaceResourceId'),'/')[4]]",
"properties": {
"mode": "Incremental",
"template": {
"$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#",
"contentVersion": "1.0.0.0",
"parameters": {},
"variables": {},
"resources": [
{
"apiVersion": "2015-11-01-preview",
"type": "Microsoft.OperationsManagement/solutions",
"location": "[parameters('workspaceRegion')]",
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"properties": {
"workspaceResourceId": "[parameters('workspaceResourceId')]"
},
"plan": {
"name": "[Concat('ContainerInsights', '(', split(parameters('workspaceResourceId'),'/')[8], ')')]",
"product": "[Concat('OMSGallery/', 'ContainerInsights')]",
"promotionCode": "",
"publisher": "Microsoft"
}
}
]
},
"parameters": {}
}
}
]
}
params = {
"workspaceResourceId": {
"value": workspace_resource_id
},
"workspaceRegion": {
"value": location
},
"solutionDeploymentName": {
"value": solution_deployment_name
}
}
deployment_name = 'aks-monitoring-{}'.format(unix_time_in_millis)
# publish the Container Insights solution to the Log Analytics workspace
return _invoke_deployment(cmd, resource_group, deployment_name, template, params,
validate=False, no_wait=False, subscription_id=subscription_id)
def _ensure_aks_acr(cli_ctx,
client_id,
acr_name_or_id,
subscription_id,
detach=False):
from msrestazure.tools import is_valid_resource_id, parse_resource_id
# Check if the ACR exists by resource ID.
if is_valid_resource_id(acr_name_or_id):
try:
parsed_registry = parse_resource_id(acr_name_or_id)
acr_client = cf_container_registry_service(cli_ctx, subscription_id=parsed_registry['subscription'])
registry = acr_client.registries.get(parsed_registry['resource_group'], parsed_registry['name'])
except CloudError as ex:
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
# Check if the ACR exists by name accross all resource groups.
registry_name = acr_name_or_id
registry_resource = 'Microsoft.ContainerRegistry/registries'
try:
registry = get_resource_by_name(cli_ctx, registry_name, registry_resource)
except CloudError as ex:
if 'was not found' in ex.message:
raise CLIError("ACR {} not found. Have you provided the right ACR name?".format(registry_name))
raise CLIError(ex.message)
_ensure_aks_acr_role_assignment(cli_ctx, client_id, registry.id, detach)
return
def aks_agentpool_show(cmd, client, resource_group_name, cluster_name, nodepool_name):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
return instance
def aks_agentpool_list(cmd, client, resource_group_name, cluster_name):
return client.list(resource_group_name, cluster_name)
def aks_agentpool_add(cmd, client, resource_group_name, cluster_name, nodepool_name,
kubernetes_version=None,
zones=None,
enable_node_public_ip=False,
node_vm_size=None,
node_osdisk_type=None,
node_osdisk_size=0,
node_count=3,
vnet_subnet_id=None,
ppg=None,
max_pods=0,
os_type="Linux",
min_count=None,
max_count=None,
enable_cluster_autoscaler=False,
node_taints=None,
priority=CONST_SCALE_SET_PRIORITY_REGULAR,
eviction_policy=CONST_SPOT_EVICTION_POLICY_DELETE,
spot_max_price=float('nan'),
tags=None,
labels=None,
max_surge=None,
mode="User",
no_wait=False):
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name == nodepool_name:
raise CLIError("Node pool {} already exists, please try a different name, "
"use 'aks nodepool list' to get current list of node pool".format(nodepool_name))
upgradeSettings = AgentPoolUpgradeSettings()
taints_array = []
if node_taints is not None:
for taint in node_taints.split(','):
try:
taint = taint.strip()
taints_array.append(taint)
except ValueError:
raise CLIError('Taint does not match allowed values. Expect value such as "special=true:NoSchedule".')
if node_vm_size is None:
if os_type.lower() == "windows":
node_vm_size = "Standard_D2s_v3"
else:
node_vm_size = "Standard_DS2_v2"
if max_surge:
upgradeSettings.max_surge = max_surge
agent_pool = AgentPool(
name=nodepool_name,
tags=tags,
node_labels=labels,
count=int(node_count),
vm_size=node_vm_size,
os_type=os_type,
vnet_subnet_id=vnet_subnet_id,
proximity_placement_group_id=ppg,
agent_pool_type="VirtualMachineScaleSets",
max_pods=int(max_pods) if max_pods else None,
orchestrator_version=kubernetes_version,
availability_zones=zones,
scale_set_priority=priority,
enable_node_public_ip=enable_node_public_ip,
node_taints=taints_array,
upgrade_settings=upgradeSettings,
mode=mode
)
if priority == CONST_SCALE_SET_PRIORITY_SPOT:
agent_pool.scale_set_eviction_policy = eviction_policy
if isnan(spot_max_price):
spot_max_price = -1
agent_pool.spot_max_price = spot_max_price
_check_cluster_autoscaler_flag(enable_cluster_autoscaler, min_count, max_count, node_count, agent_pool)
if node_osdisk_size:
agent_pool.os_disk_size_gb = int(node_osdisk_size)
if node_osdisk_type:
agent_pool.os_disk_type = node_osdisk_type
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, agent_pool)
def aks_agentpool_scale(cmd, client, resource_group_name, cluster_name,
nodepool_name,
node_count=3,
no_wait=False):
instance = client.get(resource_group_name, cluster_name, nodepool_name)
new_node_count = int(node_count)
if instance.enable_auto_scaling:
raise CLIError("Cannot scale cluster autoscaler enabled node pool.")
if new_node_count == instance.count:
raise CLIError("The new node count is the same as the current node count.")
instance.count = new_node_count # pylint: disable=no-member
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_upgrade(cmd, client, resource_group_name, cluster_name,
nodepool_name,
kubernetes_version='',
node_image_only=False,
max_surge=None,
no_wait=False):
if kubernetes_version != '' and node_image_only:
raise CLIError('Conflicting flags. Upgrading the Kubernetes version will also upgrade node image version.'
'If you only want to upgrade the node version please use the "--node-image-only" option only.')
if node_image_only:
managed_cluster_client = cf_managed_clusters(cmd.cli_ctx)
return _upgrade_single_nodepool_image_version(no_wait,
managed_cluster_client,
resource_group_name,
cluster_name,
nodepool_name)
instance = client.get(resource_group_name, cluster_name, nodepool_name)
instance.orchestrator_version = kubernetes_version
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_update(cmd, client, resource_group_name, cluster_name, nodepool_name,
enable_cluster_autoscaler=False,
disable_cluster_autoscaler=False,
update_cluster_autoscaler=False,
min_count=None, max_count=None,
tags=None,
max_surge=None,
mode=None,
no_wait=False):
update_autoscaler = enable_cluster_autoscaler + disable_cluster_autoscaler + update_cluster_autoscaler
if update_autoscaler > 1:
raise CLIError('Please specify one of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler"')
if (update_autoscaler == 0 and not tags and not mode and not max_surge):
raise CLIError('Please specify one or more of "--enable-cluster-autoscaler" or '
'"--disable-cluster-autoscaler" or '
'"--update-cluster-autoscaler" or '
'"--tags" or "--mode" or "--max-surge"')
instance = client.get(resource_group_name, cluster_name, nodepool_name)
_validate_autoscaler_update_counts(min_count, max_count, enable_cluster_autoscaler or
update_cluster_autoscaler)
if enable_cluster_autoscaler:
if instance.enable_auto_scaling:
logger.warning('Autoscaler is already enabled for this node pool.\n'
'Please run "az aks nodepool update --update-cluster-autoscaler" '
'if you want to update min-count or max-count.')
return None
instance.min_count = int(min_count)
instance.max_count = int(max_count)
instance.enable_auto_scaling = True
if update_cluster_autoscaler:
if not instance.enable_auto_scaling:
raise CLIError('Autoscaler is not enabled for this node pool.\n'
'Run "az aks nodepool update --enable-cluster-autoscaler" '
'to enable cluster with min-count and max-count.')
instance.min_count = int(min_count)
instance.max_count = int(max_count)
if not instance.upgrade_settings:
instance.upgrade_settings = AgentPoolUpgradeSettings()
if max_surge:
instance.upgrade_settings.max_surge = max_surge
if disable_cluster_autoscaler:
if not instance.enable_auto_scaling:
logger.warning('Autoscaler is already disabled for this node pool.')
return None
instance.enable_auto_scaling = False
instance.min_count = None
instance.max_count = None
instance.tags = tags
if mode is not None:
instance.mode = mode
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, cluster_name, nodepool_name, instance)
def aks_agentpool_delete(cmd, client, resource_group_name, cluster_name,
nodepool_name,
no_wait=False):
agentpool_exists = False
instances = client.list(resource_group_name, cluster_name)
for agentpool_profile in instances:
if agentpool_profile.name.lower() == nodepool_name.lower():
agentpool_exists = True
break
if not agentpool_exists:
raise CLIError("Node pool {} doesnt exist, "
"use 'aks nodepool list' to get current node pool list".format(nodepool_name))
return sdk_no_wait(no_wait, client.delete, resource_group_name, cluster_name, nodepool_name)
def aks_agentpool_get_upgrade_profile(cmd, client, resource_group_name, cluster_name, nodepool_name):
return client.get_upgrade_profile(resource_group_name, cluster_name, nodepool_name)
def _ensure_aks_acr_role_assignment(cli_ctx,
client_id,
registry_id,
detach=False):
if detach:
if not _delete_role_assignments(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not delete role assignments for ACR. '
'Are you an Owner on this subscription?')
return
if not _add_role_assignment(cli_ctx,
'acrpull',
client_id,
scope=registry_id):
raise CLIError('Could not create a role assignment for ACR. '
'Are you an Owner on this subscription?')
return
def _ensure_aks_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
aad_session_key = None
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# We don't need to add role assignment for this created SPN
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
'aad_session_key': aad_session_key,
}
def _ensure_osa_aad(cli_ctx,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
identifier=None,
name=None, create=False,
customer_admin_group_id=None):
rbac_client = get_graph_rbac_management_client(cli_ctx)
if create:
# This reply_url is temporary set since Azure need one to create the AAD.
app_id_name = 'https://{}'.format(name)
if not aad_client_app_secret:
aad_client_app_secret = _create_client_secret()
# Delegate Sign In and Read User Profile permissions on Windows Azure Active Directory API
resource_access = ResourceAccess(id="311a71cc-e848-46a1-bdf8-97ff7156d8e6",
additional_properties=None, type="Scope")
# Read directory permissions on Windows Azure Active Directory API
directory_access = ResourceAccess(id="5778995a-e1bf-45b8-affa-663a9f3f4d04",
additional_properties=None, type="Role")
required_osa_aad_access = RequiredResourceAccess(resource_access=[resource_access, directory_access],
additional_properties=None,
resource_app_id="00000002-0000-0000-c000-000000000000")
list_aad_filtered = list(rbac_client.applications.list(filter="identifierUris/any(s:s eq '{}')"
.format(app_id_name)))
if list_aad_filtered:
aad_client_app_id = list_aad_filtered[0].app_id
# Updating reply_url with the correct FQDN information returned by the RP
reply_url = 'https://{}/oauth2callback/Azure%20AD'.format(identifier)
update_application(client=rbac_client.applications,
object_id=list_aad_filtered[0].object_id,
display_name=name,
identifier_uris=[app_id_name],
reply_urls=[reply_url],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
logger.info('Updated AAD: %s', aad_client_app_id)
else:
result, _aad_session_key = create_application(client=rbac_client.applications,
display_name=name,
identifier_uris=[app_id_name],
homepage=app_id_name,
password=aad_client_app_secret,
required_resource_accesses=[required_osa_aad_access])
aad_client_app_id = result.app_id
logger.info('Created an AAD: %s', aad_client_app_id)
# Get the TenantID
if aad_tenant_id is None:
profile = Profile(cli_ctx=cli_ctx)
_, _, aad_tenant_id = profile.get_login_credentials()
return OpenShiftManagedClusterAADIdentityProvider(
client_id=aad_client_app_id,
secret=aad_client_app_secret,
tenant_id=aad_tenant_id,
kind='AADIdentityProvider',
customer_admin_group_id=customer_admin_group_id)
def _ensure_service_principal(cli_ctx,
service_principal=None,
client_secret=None,
subscription_id=None,
dns_name_prefix=None,
location=None,
name=None):
# TODO: This really needs to be unit tested.
rbac_client = get_graph_rbac_management_client(cli_ctx)
if not service_principal:
# --service-principal not specified, make one.
if not client_secret:
client_secret = _create_client_secret()
salt = binascii.b2a_hex(os.urandom(3)).decode('utf-8')
url = 'https://{}.{}.{}.cloudapp.azure.com'.format(salt, dns_name_prefix, location)
service_principal, _aad_session_key = _build_service_principal(rbac_client, cli_ctx, name, url, client_secret)
if not service_principal:
raise CLIError('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
logger.info('Created a service principal: %s', service_principal)
# add role first before save it
if not _add_role_assignment(cli_ctx, 'Contributor', service_principal):
logger.warning('Could not create a service principal with the right permissions. '
'Are you an Owner on this project?')
else:
# --service-principal specfied, validate --client-secret was too
if not client_secret:
raise CLIError('--client-secret is required if --service-principal is specified')
return {
'client_secret': client_secret,
'service_principal': service_principal,
}
def _create_client_secret():
# Add a special character to satsify AAD SP secret requirements
special_char = '$'
client_secret = binascii.b2a_hex(os.urandom(10)).decode('utf-8') + special_char
return client_secret
def _get_rg_location(ctx, resource_group_name, subscription_id=None):
groups = cf_resource_groups(ctx, subscription_id=subscription_id)
# Just do the get, we don't need the result, it will error out if the group doesn't exist.
rg = groups.get(resource_group_name)
return rg.location
def _check_cluster_autoscaler_flag(enable_cluster_autoscaler,
min_count,
max_count,
node_count,
agent_pool_profile):
if enable_cluster_autoscaler:
if min_count is None or max_count is None:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler enabled')
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count')
if int(node_count) < int(min_count) or int(node_count) > int(max_count):
raise CLIError('node-count is not in the range of min-count and max-count')
agent_pool_profile.min_count = int(min_count)
agent_pool_profile.max_count = int(max_count)
agent_pool_profile.enable_auto_scaling = True
else:
if min_count is not None or max_count is not None:
raise CLIError('min-count and max-count are required for --enable-cluster-autoscaler, please use the flag')
def _validate_autoscaler_update_counts(min_count, max_count, is_enable_or_update):
"""
Validates the min, max, and node count when performing an update
"""
if min_count is None or max_count is None:
if is_enable_or_update:
raise CLIError('Please specify both min-count and max-count when --enable-cluster-autoscaler or '
'--update-cluster-autoscaler is set.')
if min_count is not None and max_count is not None:
if int(min_count) > int(max_count):
raise CLIError('Value of min-count should be less than or equal to value of max-count.')
def _print_or_merge_credentials(path, kubeconfig, overwrite_existing, context_name):
"""Merge an unencrypted kubeconfig into the file at the specified path, or print it to
stdout if the path is "-".
"""
# Special case for printing to stdout
if path == "-":
print(kubeconfig)
return
# ensure that at least an empty ~/.kube/config exists
directory = os.path.dirname(path)
if directory and not os.path.exists(directory):
try:
os.makedirs(directory)
except OSError as ex:
if ex.errno != errno.EEXIST:
raise
if not os.path.exists(path):
with os.fdopen(os.open(path, os.O_CREAT | os.O_WRONLY, 0o600), 'wt'):
pass
# merge the new kubeconfig into the existing one
fd, temp_path = tempfile.mkstemp()
additional_file = os.fdopen(fd, 'w+t')
try:
additional_file.write(kubeconfig)
additional_file.flush()
merge_kubernetes_configurations(path, temp_path, overwrite_existing, context_name)
except yaml.YAMLError as ex:
logger.warning('Failed to merge credentials to kube config file: %s', ex)
finally:
additional_file.close()
os.remove(temp_path)
def _remove_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags']
ap_attrs = ['os_disk_size_gb', 'vnet_subnet_id']
sp_attrs = ['secret']
for managed_cluster in managed_clusters:
for attr in attrs:
if getattr(managed_cluster, attr, None) is None:
delattr(managed_cluster, attr)
if managed_cluster.agent_pool_profiles is not None:
for ap_profile in managed_cluster.agent_pool_profiles:
for attr in ap_attrs:
if getattr(ap_profile, attr, None) is None:
delattr(ap_profile, attr)
for attr in sp_attrs:
if getattr(managed_cluster.service_principal_profile, attr, None) is None:
delattr(managed_cluster.service_principal_profile, attr)
return managed_clusters
def _remove_osa_nulls(managed_clusters):
"""
Remove some often-empty fields from a list of OpenShift ManagedClusters, so the JSON representation
doesn't contain distracting null fields.
This works around a quirk of the SDK for python behavior. These fields are not sent
by the server, but get recreated by the CLI's own "to_dict" serialization.
"""
attrs = ['tags', 'plan', 'type', 'id']
ap_master_attrs = ['name', 'os_type']
net_attrs = ['peer_vnet_id']
for managed_cluster in managed_clusters:
for attr in attrs:
if hasattr(managed_cluster, attr) and getattr(managed_cluster, attr) is None:
delattr(managed_cluster, attr)
for attr in ap_master_attrs:
if getattr(managed_cluster.master_pool_profile, attr, None) is None:
delattr(managed_cluster.master_pool_profile, attr)
for attr in net_attrs:
if getattr(managed_cluster.network_profile, attr, None) is None:
delattr(managed_cluster.network_profile, attr)
return managed_clusters
def _validate_aci_location(norm_location):
"""
Validate the Azure Container Instance location
"""
aci_locations = [
"australiaeast",
"canadacentral",
"centralindia",
"centralus",
"eastasia",
"eastus",
"eastus2",
"eastus2euap",
"japaneast",
"northcentralus",
"northeurope",
"southcentralus",
"southeastasia",
"southindia",
"uksouth",
"westcentralus",
"westus",
"westus2",
"westeurope"
]
if norm_location not in aci_locations:
raise CLIError('Azure Container Instance is not available at location "{}".'.format(norm_location) +
' The available locations are "{}"'.format(','.join(aci_locations)))
def osa_list(cmd, client, resource_group_name=None):
if resource_group_name:
managed_clusters = client.list_by_resource_group(resource_group_name)
else:
managed_clusters = client.list()
return _remove_osa_nulls(list(managed_clusters))
def _format_workspace_id(workspace_id):
workspace_id = workspace_id.strip()
if not workspace_id.startswith('/'):
workspace_id = '/' + workspace_id
if workspace_id.endswith('/'):
workspace_id = workspace_id.rstrip('/')
return workspace_id
def openshift_create(cmd, client, resource_group_name, name, # pylint: disable=too-many-locals
location=None,
compute_vm_size="Standard_D4s_v3",
compute_count=3,
aad_client_app_id=None,
aad_client_app_secret=None,
aad_tenant_id=None,
vnet_prefix="10.0.0.0/8",
subnet_prefix="10.0.0.0/24",
vnet_peer=None,
tags=None,
no_wait=False,
workspace_id=None,
customer_admin_group_id=None):
logger.warning('Support for the creation of ARO 3.11 clusters ends 30 Nov 2020. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
if location is None:
location = _get_rg_location(cmd.cli_ctx, resource_group_name)
agent_pool_profiles = []
agent_node_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='compute', # Must be 12 chars or less before ACS RP adds to it
count=int(compute_count),
vm_size=compute_vm_size,
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.compute,
subnet_cidr=subnet_prefix
)
agent_infra_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='infra', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
role=OpenShiftAgentPoolProfileRole.infra,
subnet_cidr=subnet_prefix
)
agent_pool_profiles.append(agent_node_pool_profile)
agent_pool_profiles.append(agent_infra_pool_profile)
agent_master_pool_profile = OpenShiftManagedClusterAgentPoolProfile(
name='master', # Must be 12 chars or less before ACS RP adds to it
count=int(3),
vm_size="Standard_D4s_v3",
os_type="Linux",
subnet_cidr=subnet_prefix
)
identity_providers = []
create_aad = False
# Validating if the cluster is not existing since we are not supporting the AAD rotation on OSA for now
try:
client.get(resource_group_name, name)
except CloudError:
# Validating if aad_client_app_id aad_client_app_secret aad_tenant_id are set
if aad_client_app_id is None and aad_client_app_secret is None and aad_tenant_id is None:
create_aad = True
osa_aad_identity = _ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=aad_client_app_id,
aad_client_app_secret=aad_client_app_secret,
aad_tenant_id=aad_tenant_id, identifier=None,
name=name, create=create_aad,
customer_admin_group_id=customer_admin_group_id)
identity_providers.append(
OpenShiftManagedClusterIdentityProvider(
name='Azure AD',
provider=osa_aad_identity
)
)
auth_profile = OpenShiftManagedClusterAuthProfile(identity_providers=identity_providers)
default_router_profile = OpenShiftRouterProfile(name='default')
if vnet_peer is not None:
from msrestazure.tools import is_valid_resource_id, resource_id
if not is_valid_resource_id(vnet_peer):
vnet_peer = resource_id(
subscription=get_subscription_id(cmd.cli_ctx),
resource_group=resource_group_name,
namespace='Microsoft.Network', type='virtualNetwork',
name=vnet_peer
)
if workspace_id is not None:
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
else:
monitor_profile = None
network_profile = NetworkProfile(vnet_cidr=vnet_prefix, peer_vnet_id=vnet_peer)
osamc = OpenShiftManagedCluster(
location=location, tags=tags,
open_shift_version="v3.11",
network_profile=network_profile,
auth_profile=auth_profile,
agent_pool_profiles=agent_pool_profiles,
master_pool_profile=agent_master_pool_profile,
router_profiles=[default_router_profile],
monitor_profile=monitor_profile)
try:
# long_running_operation_timeout=300
result = sdk_no_wait(no_wait, client.create_or_update,
resource_group_name=resource_group_name, resource_name=name, parameters=osamc)
result = LongRunningOperation(cmd.cli_ctx)(result)
instance = client.get(resource_group_name, name)
_ensure_osa_aad(cmd.cli_ctx,
aad_client_app_id=osa_aad_identity.client_id,
aad_client_app_secret=osa_aad_identity.secret,
aad_tenant_id=osa_aad_identity.tenant_id, identifier=instance.public_hostname,
name=name, create=create_aad)
except CloudError as ex:
if "The resource type could not be found in the namespace 'Microsoft.ContainerService" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
if "No registered resource provider found for location" in ex.message:
raise CLIError('Please make sure your subscription is whitelisted to use this service. https://aka.ms/openshift/managed') # pylint: disable=line-too-long
raise ex
def openshift_show(cmd, client, resource_group_name, name):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
mc = client.get(resource_group_name, name)
return _remove_osa_nulls([mc])[0]
def openshift_scale(cmd, client, resource_group_name, name, compute_count, no_wait=False):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
# TODO: change this approach when we support multiple agent pools.
idx = 0
for i in range(len(instance.agent_pool_profiles)):
if instance.agent_pool_profiles[i].name.lower() == "compute":
idx = i
break
instance.agent_pool_profiles[idx].count = int(compute_count) # pylint: disable=no-member
# null out the AAD profile and add manually the masterAP name because otherwise validation complains
instance.master_pool_profile.name = "master"
instance.auth_profile = None
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_enable(cmd, client, resource_group_name, name, workspace_id, no_wait=False):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
workspace_id = _format_workspace_id(workspace_id)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=True, workspace_resource_id=workspace_id) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
def openshift_monitor_disable(cmd, client, resource_group_name, name, no_wait=False):
logger.warning('Support for existing ARO 3.11 clusters ends June 2022. Please see aka.ms/aro/4 for information on switching to ARO 4.') # pylint: disable=line-too-long
instance = client.get(resource_group_name, name)
monitor_profile = OpenShiftManagedClusterMonitorProfile(enabled=False, workspace_resource_id=None) # pylint: disable=line-too-long
instance.monitor_profile = monitor_profile
return sdk_no_wait(no_wait, client.create_or_update, resource_group_name, name, instance)
|
py | 1a35b9173052c9eaeb830314267fcec6a4b6caa3 | from operator import methodcaller
from readers import FileReader
COM = "COM"
YOU = "YOU"
SAN = "SAN"
def main():
raw_orbits = list(map(methodcaller("split", ")"), map(str.strip, FileReader.read_input_as_list())))
orbits = {o[1]: o[0] for o in raw_orbits}
you_planets = set_of_planets_to_home(YOU, orbits, set())
santa_planets = set_of_planets_to_home(SAN, orbits, set())
print(f"Total number jumps to santa: {len(you_planets ^ santa_planets) - 2}")
def set_of_planets_to_home(planet, orbits, planets):
if planet in orbits:
planets.add(planet)
if orbits[planet] == COM:
print(f"{len(planets)} planets to home")
return planets
return set_of_planets_to_home(orbits[planet], orbits, planets)
print(f"This is odd, did not expect to get here! Processing: {planet}")
return planets
if __name__ == "__main__":
main()
|
py | 1a35b92fe5a0f35d02f36df40b83a63a090a1bee | from cohortextractor import StudyDefinition, patients, codelist, codelist_from_csv # NOQA
study = StudyDefinition(
default_expectations={
"date": {"earliest": "1900-01-01", "latest": "today"},
"rate": "uniform",
"incidence": 0.5,
},
population=patients.registered_with_one_practice_between(
"2019-02-01", "2020-02-01"
),
age=patients.age_as_of(
"2019-09-01",
return_expectations={
"rate": "universal",
"int": {"distribution": "population_ages"},
},
),
)
|
py | 1a35b9f18c547a8bfb967bf3b80ccc9e194c6a48 | from django.db import models
from django.core.urlresolvers import reverse
from vocabs.models import SkosConcept
class Areal(models.Model):
name = models.CharField(max_length=300, blank=True)
def __str__(self):
return "{}".format(self.name)
class Planquadrat(models.Model):
name = models.CharField(max_length=300, blank=True)
def __str__(self):
return "{}".format(self.name)
class Planum(models.Model):
name = models.CharField(max_length=300, blank=True)
def __str__(self):
return "{}".format(self.name)
class ArchObject(models.Model):
title = models.CharField(max_length=300, blank=True)
object_type = models.ForeignKey(SkosConcept, blank=True)
def __str__(self):
return "{} ({})".format(self.title, self.object_type)
def get_absolute_url(self):
return reverse('documents:archobject_detail', kwargs={'pk': self.id})
class DigObject(models.Model):
title = models.CharField(max_length=300, blank=True)
object_type = models.ForeignKey(SkosConcept, blank=True)
def __str__(self):
return "{} ({})".format(self.title, self.object_type)
def get_absolute_url(self):
return reverse('documents:digobject_detail', kwargs={'pk': self.id})
class Document(models.Model):
"""Holds information about documents. (cidoc:E31)"""
document_id = models.URLField(blank=True)
document_filename = models.CharField(max_length=300, blank=True)
document_name = models.CharField(max_length=300, blank=True)
document_type = models.ForeignKey(SkosConcept, blank=True, null=True)
areal = models.ForeignKey(Areal, null=True, blank=True)
planquadrat = models.ForeignKey(Planquadrat, blank=True, null=True)
planum = models.ForeignKey(Planum, blank=True, null=True)
archobject = models.ManyToManyField(ArchObject, blank=True)
digobject = models.ManyToManyField(DigObject, blank=True)
def __str__(self):
return "{}".format(self.document_id)
def get_absolute_url(self):
return reverse('documents:document_detail', kwargs={'pk': self.id})
|
py | 1a35bb05fa9aa65b3074b18734cb5185eb8cc080 | '''Get and put messages on with IBM MQ queues.
User is based on `pymqi` for communicating with IBM MQ. However `pymqi` uses native libraries which `gevent` (used by `locust`) cannot patch,
which causes any calls in `pymqi` to block the rest of `locust`. To get around this, the user implementation communicates with a stand-alone
process via zmq, which in turn communicates with IBM MQ.
`async-messaged` starts automagically when a scenario uses `MessageQueueUser` and `pymqi` dependencies are installed.
## Request methods
Supports the following request methods:
* send
* put
* get
* receive
## Format
Format of `host` is the following:
``` plain
mq://<hostname>:<port>/?QueueManager=<queue manager name>&Channel=<channel name>
```
`endpoint` in the request is the name of an MQ queue. This can also be combined with an expression, if
a specific message is to be retrieved from the queue. The format of endpoint is:
``` plain
queue:<queue_name>[, expression:<expression>]
```
Where `<expression>` can be a XPath or jsonpath expression, depending on the specified content type. See example below.
## Examples
Example of how to use it in a scenario:
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mq.example.com/?QueueManager=QM01&Channel=SRVCONN01"
Then put request "test/queue-message.j2.json" with name "queue-message" to endpoint "queue:INCOMING.MESSAGES"
```
### Get message
Default behavior is to fail directly if there is no message on the queue. If the request should wait until a message is available,
set the time it should wait with `message.wait` (seconds) context variable.
To keep the connection alive during longer waiting periods, a heartbeat interval can be configured using the
`connection.heartbeat_interval` (seconds) context variable (default 300).
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mq.example.com/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "message.wait" to "5"
Then get request with name "get-queue-message" from endpoint "queue:INCOMING.MESSAGES"
```
In this example, the request will not fail if there is a message on queue within 5 seconds.
### Get message with expression
When specifying an expression, the messages on the queue are first browsed. If any message matches the expression, it is
later consumed from the queue. If no matching message was found during browsing, it is repeated again after a slight delay,
up until the specified `message.wait` seconds has elapsed. To use expressions, a content type must be specified for the get
request, e.g. `application/xml`:
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mq.example.com/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "message.wait" to "5"
Then get request with name "get-specific-queue-message" from endpoint "queue:INCOMING.MESSAGES, expression: //document[@id='abc123']"
And set response content type to "application/xml"
```
### Authentication
#### Username and password
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mqm:[email protected]/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "auth.username" to "<username>"
And set context variable "auth.password" to "<password>"
```
#### With TLS
A [key repository](https://www.ibm.com/docs/en/ibm-mq/7.5?topic=wstulws-setting-up-key-repository-unix-linux-windows-systems)
(3 files; `.kdb`, `.rdb` and `.sth`) for the user is needed, and is specified with `auth.key_file` excluding the file extension.
``` gherkin
Given a user of type "MessageQueue" load testing "mq://mqm:[email protected]/?QueueManager=QM01&Channel=SRVCONN01"
And set context variable "auth.username" to "<username>"
And set context variable "auth.password" to "<password>"
And set context variable "auth.key_file" to "<path to key file, excl. file extension>"
```
Default SSL cipher is `ECDHE_RSA_AES_256_GCM_SHA384`, change it by setting `auth.ssl_cipher` context variable.
Default certificate label is set to `auth.username`, change it by setting `auth.cert_label` context variable.
'''
import logging
from typing import Dict, Any, Generator, Tuple, Optional, cast
from urllib.parse import urlparse, parse_qs, unquote
from contextlib import contextmanager
from time import perf_counter as time
from zmq.sugar.constants import NOBLOCK as ZMQ_NOBLOCK, REQ as ZMQ_REQ
from zmq.error import Again as ZMQAgain
import zmq.green as zmq
from locust.exception import StopUser
from locust.env import Environment
from gevent import sleep as gsleep
from grizzly_extras.async_message import AsyncMessageContext, AsyncMessageRequest, AsyncMessageResponse, AsyncMessageError
from grizzly_extras.arguments import get_unsupported_arguments, parse_arguments
from ..types import GrizzlyResponse, RequestDirection, RequestType
from ..tasks import RequestTask
from ..utils import merge_dicts
from .base import GrizzlyUser, ResponseHandler, RequestLogger
from . import logger
# no used here, but needed for sanity check
try:
# do not fail grizzly if ibm mq dependencies are missing, some might
# not be interested in MessageQueueUser.
import pymqi # pylint: disable=unused-import
except:
from grizzly_extras import dummy_pymqi as pymqi
class MessageQueueUser(ResponseHandler, RequestLogger, GrizzlyUser):
_context: Dict[str, Any] = {
'auth': {
'username': None,
'password': None,
'key_file': None,
'cert_label': None,
'ssl_cipher': None
},
'message': {
'wait': None,
}
}
__dependencies__ = set(['async-messaged'])
am_context: AsyncMessageContext
worker_id: Optional[str]
zmq_context = zmq.Context()
zmq_client: zmq.Socket
zmq_url = 'tcp://127.0.0.1:5554'
def __init__(self, environment: Environment, *args: Tuple[Any, ...], **kwargs: Dict[str, Any]) -> None:
if pymqi.__name__ == 'grizzly_extras.dummy_pymqi':
pymqi.raise_for_error(self.__class__)
super().__init__(environment, *args, **kwargs)
# Get configuration values from host string
parsed = urlparse(self.host or '')
if parsed.scheme != 'mq':
raise ValueError(f'"{parsed.scheme}" is not a supported scheme for {self.__class__.__name__}')
if parsed.hostname is None or len(parsed.hostname) < 1:
raise ValueError(f'{self.__class__.__name__}: hostname is not specified in {self.host}')
if parsed.username is not None or parsed.password is not None:
raise ValueError(f'{self.__class__.__name__}: username and password should be set via context variables "auth.username" and "auth.password"')
if parsed.query == '':
raise ValueError(f'{self.__class__.__name__} needs QueueManager and Channel in the query string')
port = parsed.port or 1414
self.am_context = {
'url': self.host or '',
'connection': f'{parsed.hostname}({port})',
}
params = parse_qs(parsed.query)
if 'QueueManager' not in params:
raise ValueError(f'{self.__class__.__name__} needs QueueManager in the query string')
if 'Channel' not in params:
raise ValueError(f'{self.__class__.__name__} needs Channel in the query string')
self.am_context.update({
'queue_manager': unquote(params['QueueManager'][0]),
'channel': unquote(params['Channel'][0]),
})
# Get configuration values from context
self._context = merge_dicts(super().context(), self.__class__._context)
auth_context = self._context.get('auth', {})
username = auth_context.get('username', None)
self.am_context.update({
'username': username,
'password': auth_context.get('password', None),
'key_file': auth_context.get('key_file', None),
'cert_label': auth_context.get('cert_label', None) or username,
'ssl_cipher': auth_context.get('ssl_cipher', None) or 'ECDHE_RSA_AES_256_GCM_SHA384',
'message_wait': self._context.get('message', {}).get('wait', None),
'heartbeat_interval': self._context.get('connection', {}).get('heartbeat_interval', None),
})
self.worker_id = None
# silence uamqp loggers
for uamqp_logger_name in ['uamqp', 'uamqp.c_uamqp']:
logging.getLogger(uamqp_logger_name).setLevel(logging.ERROR)
def request(self, request: RequestTask) -> GrizzlyResponse:
request_name, endpoint, payload = self.render(request)
@contextmanager
def action_context(am_request: AsyncMessageRequest, name: str) -> Generator[Dict[str, Any], None, None]:
exception: Optional[Exception] = None
action: Dict[str, Any] = {
'failure_exception': None,
'meta': False,
'payload': None,
'metadata': None,
}
response: Optional[AsyncMessageResponse] = None
start_time = time()
try:
yield action
self.zmq_client.send_json(am_request)
# do not block all other "threads", just it self
while True:
try:
response = cast(AsyncMessageResponse, self.zmq_client.recv_json(flags=ZMQ_NOBLOCK))
break
except ZMQAgain:
gsleep(0.1)
except Exception as e:
exception = e
finally:
total_time = int((time() - start_time) * 1000) # do not include event handling in request time
if response is not None:
if self.worker_id is None:
self.worker_id = response['worker']
else:
assert self.worker_id == response['worker'], f'worker changed from {self.worker_id} to {response["worker"]}'
mq_response_time = response.get('response_time', 0)
delta = total_time - mq_response_time
if delta > 100: # @TODO: what is a suitable value?
logger.warning(f'{self.__class__.__name__}: communicating with async-messaged took {delta} ms')
if not response['success'] and exception is None:
exception = AsyncMessageError(response['message'])
else:
response = {}
action['metadata'] = response.get('metadata', None)
action['payload'] = response.get('payload', None)
try:
if not action.get('meta', False):
self.response_event.fire(
name=name,
request=request,
context=(
response.get('metadata', None),
response.get('payload', None),
),
user=self,
exception=exception,
)
except Exception as e:
if exception is None:
exception = e
finally:
self.environment.events.request.fire(
request_type=RequestType.from_string(am_request['action']),
name=name,
response_time=total_time,
response_length=response.get('response_length', None) or 0,
context=self._context,
exception=exception,
)
failure_exception = action.get('failure_exception', None)
action = {
'payload': action['payload'],
'metadata': action['metadata'],
}
if exception is not None and failure_exception is not None:
try:
self.zmq_client.disconnect(self.zmq_url)
except:
pass
raise failure_exception()
name = f'{request.scenario.identifier} {request_name}'
# connect to queue manager at first request
if self.worker_id is None:
with action_context({
'action': RequestType.CONNECT(),
'context': self.am_context
}, self.am_context['connection']) as action:
action.update({
'meta': True,
'failure_exception': request.scenario.failure_exception,
})
self.zmq_client = self.zmq_context.socket(ZMQ_REQ)
self.zmq_client.connect(self.zmq_url)
am_request: AsyncMessageRequest = {
'action': request.method.name,
'worker': self.worker_id,
'context': {
'endpoint': endpoint,
},
'payload': payload,
}
am_request['context']['content_type'] = request.response.content_type.name.lower()
with action_context(am_request, name) as action:
action['failure_exception'] = StopUser
# Parse the endpoint to validate queue name / expression parts
try:
arguments = parse_arguments(endpoint, ':')
except ValueError as e:
raise RuntimeError(str(e)) from e
if 'queue' not in arguments:
raise RuntimeError('queue name must be prefixed with queue:')
unsupported_arguments = get_unsupported_arguments(['queue', 'expression'], arguments)
if len(unsupported_arguments) > 0:
raise RuntimeError(f'arguments {", ".join(unsupported_arguments)} is not supported')
if 'expression' in arguments and request.method.direction != RequestDirection.FROM:
raise RuntimeError('argument "expression" is not allowed when sending to an endpoint')
action['failure_exception'] = request.scenario.failure_exception
return action['metadata'], action['payload']
|
py | 1a35bb64d32c70c49072417e4bba7572ed461ddf | """
transitions.extensions.factory
------------------------------
This module contains the definitions of classes which combine the functionality of transitions'
extension modules. These classes can be accessed by names as well as through a static convenience
factory object.
"""
from functools import partial
from ..core import Machine
from .nesting import HierarchicalMachine, NestedTransition, NestedEvent
from .locking import LockedMachine
from .diagrams import GraphMachine, TransitionGraphSupport
from .markup import MarkupMachine
try:
from transitions.extensions.asyncio import AsyncMachine, AsyncTransition
from transitions.extensions.asyncio import HierarchicalAsyncMachine, NestedAsyncTransition
except (ImportError, SyntaxError):
class AsyncMachine: # Mocks for Python version 3.6 and earlier
pass
class AsyncTransition:
pass
class HierarchicalAsyncMachine:
pass
class NestedAsyncTransition:
pass
class MachineFactory(object):
"""
Convenience factory for machine class retrieval.
"""
# get one of the predefined classes which fulfill the criteria
@staticmethod
def get_predefined(graph=False, nested=False, locked=False, asyncio=False):
""" A function to retrieve machine classes by required functionality.
Args:
graph (bool): Whether the returned class should contain graph support.
nested: Whether the returned machine class should support nested states.
locked: Whether the returned class should facilitate locks for threadsafety.
Returns (class): A machine class with the specified features.
"""
try:
return _CLASS_MAP[(graph, nested, locked, asyncio)]
except KeyError:
raise ValueError("Feature combination not (yet) supported")
class NestedGraphTransition(TransitionGraphSupport, NestedTransition):
"""
A transition type to be used with (subclasses of) `HierarchicalGraphMachine` and
`LockedHierarchicalGraphMachine`.
"""
pass
class HierarchicalMarkupMachine(MarkupMachine, HierarchicalMachine):
pass
class HierarchicalGraphMachine(GraphMachine, HierarchicalMarkupMachine):
"""
A hierarchical state machine with graph support.
"""
transition_cls = NestedGraphTransition
class LockedHierarchicalMachine(LockedMachine, HierarchicalMachine):
"""
A threadsafe hierarchical machine.
"""
event_cls = NestedEvent
def _get_qualified_state_name(self, state):
return self.get_global_name(state.name)
class LockedGraphMachine(GraphMachine, LockedMachine):
"""
A threadsafe machine with graph support.
"""
@staticmethod
def format_references(func):
if isinstance(func, partial) and func.func.__name__.startswith('_locked_method'):
func = func.args[0]
return GraphMachine.format_references(func)
class LockedHierarchicalGraphMachine(GraphMachine, LockedHierarchicalMachine):
"""
A threadsafe hierarchical machine with graph support.
"""
transition_cls = NestedGraphTransition
event_cls = NestedEvent
@staticmethod
def format_references(func):
if isinstance(func, partial) and func.func.__name__.startswith('_locked_method'):
func = func.args[0]
return GraphMachine.format_references(func)
class AsyncGraphMachine(GraphMachine, AsyncMachine):
transition_cls = AsyncTransition
class HierarchicalAsyncGraphMachine(GraphMachine, HierarchicalAsyncMachine):
transition_cls = NestedAsyncTransition
# 4d tuple (graph, nested, locked, async)
_CLASS_MAP = {
(False, False, False, False): Machine,
(False, False, True, False): LockedMachine,
(False, True, False, False): HierarchicalMachine,
(False, True, True, False): LockedHierarchicalMachine,
(True, False, False, False): GraphMachine,
(True, False, True, False): LockedGraphMachine,
(True, True, False, False): HierarchicalGraphMachine,
(True, True, True, False): LockedHierarchicalGraphMachine,
(False, False, False, True): AsyncMachine,
(True, False, False, True): AsyncGraphMachine,
(False, True, False, True): HierarchicalAsyncMachine,
(True, True, False, True): HierarchicalAsyncGraphMachine
}
|
py | 1a35bccb0c317bbb9349b0499dca0a79d7e7545c | #!/usr/bin/env python3
import warnings
from copy import deepcopy
import torch
from .. import settings
from ..distributions import MultivariateNormal
from ..likelihoods import _GaussianLikelihoodBase
from ..utils.broadcasting import _mul_broadcast_shape
from .exact_prediction_strategies import prediction_strategy
from .gp import GP
class ExactGP(GP):
r"""
The base class for any Gaussian process latent function to be used in conjunction
with exact inference.
:param torch.Tensor train_inputs: (size n x d) The training features :math:`\mathbf X`.
:param torch.Tensor train_targets: (size n) The training targets :math:`\mathbf y`.
:param ~gpytorch.likelihoods.GaussianLikelihood likelihood: The Gaussian likelihood that defines
the observational distribution. Since we're using exact inference, the likelihood must be Gaussian.
The :meth:`forward` function should describe how to compute the prior latent distribution
on a given input. Typically, this will involve a mean and kernel function.
The result must be a :obj:`~gpytorch.distributions.MultivariateNormal`.
Calling this model will return the posterior of the latent Gaussian process when conditioned
on the training data. The output will be a :obj:`~gpytorch.distributions.MultivariateNormal`.
Example:
>>> class MyGP(gpytorch.models.ExactGP):
>>> def __init__(self, train_x, train_y, likelihood):
>>> super().__init__(train_x, train_y, likelihood)
>>> self.mean_module = gpytorch.means.ZeroMean()
>>> self.covar_module = gpytorch.kernels.ScaleKernel(gpytorch.kernels.RBFKernel())
>>>
>>> def forward(self, x):
>>> mean = self.mean_module(x)
>>> covar = self.covar_module(x)
>>> return gpytorch.distributions.MultivariateNormal(mean, covar)
>>>
>>> # train_x = ...; train_y = ...
>>> likelihood = gpytorch.likelihoods.GaussianLikelihood()
>>> model = MyGP(train_x, train_y, likelihood)
>>>
>>> # test_x = ...;
>>> model(test_x) # Returns the GP latent function at test_x
>>> likelihood(model(test_x)) # Returns the (approximate) predictive posterior distribution at test_x
"""
def __init__(self, train_inputs, train_targets, likelihood):
if train_inputs is not None and torch.is_tensor(train_inputs):
train_inputs = (train_inputs,)
if train_inputs is not None and not all(torch.is_tensor(train_input) for train_input in train_inputs):
raise RuntimeError("Train inputs must be a tensor, or a list/tuple of tensors")
if not isinstance(likelihood, _GaussianLikelihoodBase):
raise RuntimeError("ExactGP can only handle Gaussian likelihoods")
super(ExactGP, self).__init__()
if train_inputs is not None:
self.train_inputs = tuple(tri.unsqueeze(-1) if tri.ndimension() == 1 else tri for tri in train_inputs)
self.train_targets = train_targets
else:
self.train_inputs = None
self.train_targets = None
self.likelihood = likelihood
self.prediction_strategy = None
@property
def train_targets(self):
return self._train_targets
@train_targets.setter
def train_targets(self, value):
object.__setattr__(self, "_train_targets", value)
def _apply(self, fn):
if self.train_inputs is not None:
self.train_inputs = tuple(fn(train_input) for train_input in self.train_inputs)
self.train_targets = fn(self.train_targets)
return super(ExactGP, self)._apply(fn)
def local_load_samples(self, samples_dict, memo, prefix):
"""
Replace the model's learned hyperparameters with samples from a posterior distribution.
"""
# Pyro always puts the samples in the first batch dimension
num_samples = next(iter(samples_dict.values())).size(0)
self.train_inputs = tuple(tri.unsqueeze(0).expand(num_samples, *tri.shape) for tri in self.train_inputs)
self.train_targets = self.train_targets.unsqueeze(0).expand(num_samples, *self.train_targets.shape)
super().local_load_samples(samples_dict, memo, prefix)
def set_train_data(self, inputs=None, targets=None, strict=True):
"""
Set training data (does not re-fit model hyper-parameters).
:param torch.Tensor inputs: The new training inputs.
:param torch.Tensor targets: The new training targets.
:param bool strict: (default True) If `True`, the new inputs and
targets must have the same shape, dtype, and device
as the current inputs and targets. Otherwise, any shape/dtype/device are allowed.
"""
if inputs is not None:
if torch.is_tensor(inputs):
inputs = (inputs,)
inputs = tuple(input_.unsqueeze(-1) if input_.ndimension() == 1 else input_ for input_ in inputs)
if strict:
for input_, t_input in zip(inputs, self.train_inputs or (None,)):
for attr in {"shape", "dtype", "device"}:
expected_attr = getattr(t_input, attr, None)
found_attr = getattr(input_, attr, None)
if expected_attr != found_attr:
msg = "Cannot modify {attr} of inputs (expected {e_attr}, found {f_attr})."
msg = msg.format(attr=attr, e_attr=expected_attr, f_attr=found_attr)
raise RuntimeError(msg)
self.train_inputs = inputs
if targets is not None:
if strict:
for attr in {"shape", "dtype", "device"}:
expected_attr = getattr(self.train_targets, attr, None)
found_attr = getattr(targets, attr, None)
if expected_attr != found_attr:
msg = "Cannot modify {attr} of targets (expected {e_attr}, found {f_attr})."
msg = msg.format(attr=attr, e_attr=expected_attr, f_attr=found_attr)
raise RuntimeError(msg)
self.train_targets = targets
self.prediction_strategy = None
def get_fantasy_model(self, inputs, targets, **kwargs):
"""
Returns a new GP model that incorporates the specified inputs and targets as new training data.
Using this method is more efficient than updating with `set_train_data` when the number of inputs is relatively
small, because any computed test-time caches will be updated in linear time rather than computed from scratch.
.. note::
If `targets` is a batch (e.g. `b x m`), then the GP returned from this method will be a batch mode GP.
If `inputs` is of the same (or lesser) dimension as `targets`, then it is assumed that the fantasy points
are the same for each target batch.
:param torch.Tensor inputs: (`b1 x ... x bk x m x d` or `f x b1 x ... x bk x m x d`) Locations of fantasy
observations.
:param torch.Tensor targets: (`b1 x ... x bk x m` or `f x b1 x ... x bk x m`) Labels of fantasy observations.
:return: An `ExactGP` model with `n + m` training examples, where the `m` fantasy examples have been added
and all test-time caches have been updated.
:rtype: ~gpytorch.models.ExactGP
"""
if self.prediction_strategy is None:
raise RuntimeError(
"Fantasy observations can only be added after making predictions with a model so that "
"all test independent caches exist. Call the model on some data first!"
)
model_batch_shape = self.train_inputs[0].shape[:-2]
if self.train_targets.dim() > len(model_batch_shape) + 1:
raise RuntimeError("Cannot yet add fantasy observations to multitask GPs, but this is coming soon!")
if not isinstance(inputs, list):
inputs = [inputs]
inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in inputs]
target_batch_shape = targets.shape[:-1]
input_batch_shape = inputs[0].shape[:-2]
tbdim, ibdim = len(target_batch_shape), len(input_batch_shape)
if not (tbdim == ibdim + 1 or tbdim == ibdim):
raise RuntimeError(
f"Unsupported batch shapes: The target batch shape ({target_batch_shape}) must have either the "
f"same dimension as or one more dimension than the input batch shape ({input_batch_shape})"
)
# Check whether we can properly broadcast batch dimensions
err_msg = (
f"Model batch shape ({model_batch_shape}) and target batch shape "
f"({target_batch_shape}) are not broadcastable."
)
_mul_broadcast_shape(model_batch_shape, target_batch_shape, error_msg=err_msg)
if len(model_batch_shape) > len(input_batch_shape):
input_batch_shape = model_batch_shape
if len(model_batch_shape) > len(target_batch_shape):
target_batch_shape = model_batch_shape
# If input has no fantasy batch dimension but target does, we can save memory and computation by not
# computing the covariance for each element of the batch. Therefore we don't expand the inputs to the
# size of the fantasy model here - this is done below, after the evaluation and fast fantasy update
train_inputs = [tin.expand(input_batch_shape + tin.shape[-2:]) for tin in self.train_inputs]
train_targets = self.train_targets.expand(target_batch_shape + self.train_targets.shape[-1:])
full_inputs = [
torch.cat([train_input, input.expand(input_batch_shape + input.shape[-2:])], dim=-2)
for train_input, input in zip(train_inputs, inputs)
]
full_targets = torch.cat([train_targets, targets.expand(target_batch_shape + targets.shape[-1:])], dim=-1)
try:
fantasy_kwargs = {"noise": kwargs.pop("noise")}
except KeyError:
fantasy_kwargs = {}
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
# Copy model without copying training data or prediction strategy (since we'll overwrite those)
old_pred_strat = self.prediction_strategy
old_train_inputs = self.train_inputs
old_train_targets = self.train_targets
old_likelihood = self.likelihood
self.prediction_strategy = None
self.train_inputs = None
self.train_targets = None
self.likelihood = None
new_model = deepcopy(self)
self.prediction_strategy = old_pred_strat
self.train_inputs = old_train_inputs
self.train_targets = old_train_targets
self.likelihood = old_likelihood
new_model.likelihood = old_likelihood.get_fantasy_likelihood(**fantasy_kwargs)
new_model.prediction_strategy = old_pred_strat.get_fantasy_strategy(
inputs, targets, full_inputs, full_targets, full_output, **fantasy_kwargs
)
# if the fantasies are at the same points, we need to expand the inputs for the new model
if tbdim == ibdim + 1:
new_model.train_inputs = [fi.expand(target_batch_shape + fi.shape[-2:]) for fi in full_inputs]
else:
new_model.train_inputs = full_inputs
new_model.train_targets = full_targets
return new_model
def train(self, mode=True):
if mode:
self.prediction_strategy = None
return super(ExactGP, self).train(mode)
def _load_from_state_dict(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
self.prediction_strategy = None
super()._load_from_state_dict(
state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
)
def __call__(self, *args, **kwargs):
train_inputs = list(self.train_inputs) if self.train_inputs is not None else []
inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in args]
# Training mode: optimizing
if self.training:
if self.train_inputs is None:
raise RuntimeError(
"train_inputs, train_targets cannot be None in training mode. "
"Call .eval() for prior predictions, or call .set_train_data() to add training data."
)
if settings.debug.on():
if not all(torch.equal(train_input, input) for train_input, input in zip(train_inputs, inputs)):
raise RuntimeError("You must train on the training inputs!")
res = super().__call__(*inputs, **kwargs)
return res
# Prior mode
elif settings.prior_mode.on() or self.train_inputs is None or self.train_targets is None:
full_inputs = args
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
if settings.debug().on():
if not isinstance(full_output, MultivariateNormal):
raise RuntimeError("ExactGP.forward must return a MultivariateNormal")
return full_output
# Posterior mode
else:
if settings.debug.on():
if all(torch.equal(train_input, input) for train_input, input in zip(train_inputs, inputs)):
warnings.warn(
"The input matches the stored training data. Did you forget to call model.train()?", UserWarning
)
# Get the terms that only depend on training data
if self.prediction_strategy is None:
train_output = super().__call__(*train_inputs, **kwargs)
# Create the prediction strategy for
self.prediction_strategy = prediction_strategy(
train_inputs=train_inputs,
train_prior_dist=train_output,
train_labels=self.train_targets,
likelihood=self.likelihood,
)
# Concatenate the input to the training input
full_inputs = []
batch_shape = train_inputs[0].shape[:-2]
for train_input, input in zip(train_inputs, inputs):
# Make sure the batch shapes agree for training/test data
if batch_shape != train_input.shape[:-2]:
batch_shape = _mul_broadcast_shape(batch_shape, train_input.shape[:-2])
train_input = train_input.expand(*batch_shape, *train_input.shape[-2:])
if batch_shape != input.shape[:-2]:
batch_shape = _mul_broadcast_shape(batch_shape, input.shape[:-2])
train_input = train_input.expand(*batch_shape, *train_input.shape[-2:])
input = input.expand(*batch_shape, *input.shape[-2:])
full_inputs.append(torch.cat([train_input, input], dim=-2))
# Get the joint distribution for training/test data
full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs)
if settings.debug().on():
if not isinstance(full_output, MultivariateNormal):
raise RuntimeError("ExactGP.forward must return a MultivariateNormal")
full_mean, full_covar = full_output.loc, full_output.lazy_covariance_matrix
# Determine the shape of the joint distribution
batch_shape = full_output.batch_shape
joint_shape = full_output.event_shape
tasks_shape = joint_shape[1:] # For multitask learning
test_shape = torch.Size([joint_shape[0] - self.prediction_strategy.train_shape[0], *tasks_shape])
# Make the prediction
with settings._use_eval_tolerance():
predictive_mean, predictive_covar = self.prediction_strategy.exact_prediction(full_mean, full_covar)
# Reshape predictive mean to match the appropriate event shape
predictive_mean = predictive_mean.view(*batch_shape, *test_shape).contiguous()
return full_output.__class__(predictive_mean, predictive_covar)
|
py | 1a35bcdde50f7194d88c6eec4dacb5962b21935e | """PyLabware driver for Buchi C815 flash system."""
import json
import warnings
from typing import Dict, Optional, Union, Any
# Core imports
from ..controllers import AbstractFlashChromatographySystem, in_simulation_device_returns
from ..exceptions import PLConnectionError, PLDeviceReplyError
from ..models import ConnectionParameters, LabDeviceCommands, LabDeviceReply
class C815Commands(LabDeviceCommands):
"""Collection of command definitions for C815 flash chromatography system.
"""
# ################### Configuration constants #############################
C815_IDLE_STATE = "Idle"
C815_SYSTEMMODEL = "C815_FlashAdvanced"
# !!! THESE VALUES ARE AUTO-GENERATED FROM API SPECIFICATIONS !!!
# ################### Process parameters #############################
GET_SYSTEMCLASS = {'name': 'GET_SYSTEMCLASS', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['systemClass'], 'reply': {'type': str}}
GET_SYSTEMLINE = {'name': 'GET_SYSTEMLINE', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['systemLine'], 'reply': {'type': str}}
GET_SYSTEMNAME = {'name': 'GET_SYSTEMNAME', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['systemName'], 'reply': {'type': str}}
GET_SYSTEMMODEL = {'name': 'GET_SYSTEMMODEL', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['systemModel'], 'reply': {'type': str}}
GET_DETECTORS = {'name': 'GET_DETECTORS', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['detectors'], 'reply': {'type': list}}
GET_PUMP_PUMPTYPE = {'name': 'GET_PUMP_PUMPTYPE', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['pump', 'pumpType'], 'reply': {'type': str}}
GET_PUMP_FIRMWARE = {'name': 'GET_PUMP_FIRMWARE', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['pump', 'firmware'], 'reply': {'type': str}}
GET_PUMP_HARDWARE = {'name': 'GET_PUMP_HARDWARE', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['pump', 'hardware'], 'reply': {'type': str}}
GET_FRACTIONCOLLECTOR_FIRMWARE = {'name': 'GET_FRACTIONCOLLECTOR_FIRMWARE', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['fractionCollector', 'firmware'], 'reply': {'type': str}}
GET_FRACTIONCOLLECTOR_TRAYS = {'name': 'GET_FRACTIONCOLLECTOR_TRAYS', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['fractionCollector', 'trays'], 'reply': {'type': list}}
GET_COLUMN_VERSION = {'name': 'GET_COLUMN_VERSION', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['column', 'version'], 'reply': {'type': str}}
GET_COLUMN_COLUMNNAME = {'name': 'GET_COLUMN_COLUMNNAME', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['column', 'columnName'], 'reply': {'type': str}}
GET_COLUMN_DATA = {'name': 'GET_COLUMN_DATA', 'method': 'GET', 'endpoint': '/api/v1/Info', 'path': ['column', 'data'], 'reply': {'type': str}}
GET_RUNNINGSTATE = {'name': 'GET_RUNNINGSTATE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['runningState'], 'reply': {'type': str}}
GET_RUNMODE = {'name': 'GET_RUNMODE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['runMode'], 'reply': {'type': str}}
GET_SENSORS_SOLVENTPRESSUREAFTERPUMP = {'name': 'GET_SENSORS_SOLVENTPRESSUREAFTERPUMP', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'solventPressureAfterPump'], 'reply': {'type': float}}
GET_SENSORS_SOLVENTPRESSUREAFTERCOLUMN = {'name': 'GET_SENSORS_SOLVENTPRESSUREAFTERCOLUMN', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'solventPressureAfterColumn'], 'reply': {'type': float}}
GET_SENSORS_AIRPRESSURENEBULIZER = {'name': 'GET_SENSORS_AIRPRESSURENEBULIZER', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'airPressureNebulizer'], 'reply': {'type': float}}
GET_SENSORS_AIRPRESSUREINLET = {'name': 'GET_SENSORS_AIRPRESSUREINLET', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'airPressureInlet'], 'reply': {'type': float}}
GET_SENSORS_VAPORLEVEL = {'name': 'GET_SENSORS_VAPORLEVEL', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'vaporLevel'], 'reply': {'type': int}}
GET_SENSORS_SOLVENTLEVELS = {'name': 'GET_SENSORS_SOLVENTLEVELS', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'solventLevels'], 'reply': {'type': list}}
GET_SENSORS_WASTELEVEL = {'name': 'GET_SENSORS_WASTELEVEL', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['sensors', 'wasteLevel'], 'reply': {'type': float}}
GET_AIRSYSTEM_ISENABLED = {'name': 'GET_AIRSYSTEM_ISENABLED', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['airSystem', 'isEnabled'], 'reply': {'type': bool}}
GET_AIRSYSTEM_VALVEPOS = {'name': 'GET_AIRSYSTEM_VALVEPOS', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['airSystem', 'valvePos'], 'reply': {'type': str}}
GET_ELSDDETECTOR_LASERISENABLED = {'name': 'GET_ELSDDETECTOR_LASERISENABLED', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'laserIsEnabled'], 'reply': {'type': bool}}
GET_ELSDDETECTOR_LASERVOLTAGE = {'name': 'GET_ELSDDETECTOR_LASERVOLTAGE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'laserVoltage'], 'reply': {'type': float}}
GET_ELSDDETECTOR_SHUTTLEVALVEISENABLED = {'name': 'GET_ELSDDETECTOR_SHUTTLEVALVEISENABLED', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'shuttleValveIsEnabled'], 'reply': {'type': bool}}
GET_ELSDDETECTOR_CARRIERFLOWISENABLED = {'name': 'GET_ELSDDETECTOR_CARRIERFLOWISENABLED', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'carrierFlowIsEnabled'], 'reply': {'type': bool}}
GET_ELSDDETECTOR_SENSITIVITY = {'name': 'GET_ELSDDETECTOR_SENSITIVITY', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'sensitivity'], 'reply': {'type': str}}
GET_ELSDDETECTOR_SIGNAL_TIMESINCESTART = {'name': 'GET_ELSDDETECTOR_SIGNAL_TIMESINCESTART', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'signal', 'timeSinceStart'], 'reply': {'type': str}}
GET_ELSDDETECTOR_SIGNAL_SIGNAL = {'name': 'GET_ELSDDETECTOR_SIGNAL_SIGNAL', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['elsdDetector', 'signal', 'signal'], 'reply': {'type': float}}
GET_FRACTIONCOLLECTOR_POSITION_TRAY = {'name': 'GET_FRACTIONCOLLECTOR_POSITION_TRAY', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['fractionCollector', 'position', 'tray'], 'reply': {'type': str}}
GET_FRACTIONCOLLECTOR_POSITION_VIAL = {'name': 'GET_FRACTIONCOLLECTOR_POSITION_VIAL', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['fractionCollector', 'position', 'vial'], 'reply': {'type': str}}
GET_FRACTIONCOLLECTOR_COLLECTIONTASK_ACTION = {'name': 'GET_FRACTIONCOLLECTOR_COLLECTIONTASK_ACTION', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['fractionCollector', 'collectionTask', 'action'], 'reply': {'type': str}}
GET_SOLVENTSYSTEM_FLOWISENABLED = {'name': 'GET_SOLVENTSYSTEM_FLOWISENABLED', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'flowIsEnabled'], 'reply': {'type': bool}}
GET_SOLVENTSYSTEM_FLOWRATE = {'name': 'GET_SOLVENTSYSTEM_FLOWRATE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'flowRate'], 'reply': {'type': int}}
GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE1PERCENTAGE = {'name': 'GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE1PERCENTAGE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'solventMixture', 'line1Percentage'], 'reply': {'type': float}}
GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE2PERCENTAGE = {'name': 'GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE2PERCENTAGE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'solventMixture', 'line2Percentage'], 'reply': {'type': float}}
GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE3PERCENTAGE = {'name': 'GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE3PERCENTAGE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'solventMixture', 'line3Percentage'], 'reply': {'type': float}}
GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE4PERCENTAGE = {'name': 'GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE4PERCENTAGE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'solventMixture', 'line4Percentage'], 'reply': {'type': float}}
GET_SOLVENTSYSTEM_SAMPLEINJECTIONVALVEPOS = {'name': 'GET_SOLVENTSYSTEM_SAMPLEINJECTIONVALVEPOS', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'sampleInjectionValvePos'], 'reply': {'type': str}}
GET_SOLVENTSYSTEM_MODE = {'name': 'GET_SOLVENTSYSTEM_MODE', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['solventSystem', 'mode'], 'reply': {'type': str}}
GET_UVDETECTOR_ABSORBANCE_TIMESINCESTART = {'name': 'GET_UVDETECTOR_ABSORBANCE_TIMESINCESTART', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'absorbance', 'timeSinceStart'], 'reply': {'type': str}}
GET_UVDETECTOR_ABSORBANCE_CH1 = {'name': 'GET_UVDETECTOR_ABSORBANCE_CH1', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'absorbance', 'ch1'], 'reply': {'type': float}}
GET_UVDETECTOR_ABSORBANCE_CH2 = {'name': 'GET_UVDETECTOR_ABSORBANCE_CH2', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'absorbance', 'ch2'], 'reply': {'type': float}}
GET_UVDETECTOR_ABSORBANCE_CH3 = {'name': 'GET_UVDETECTOR_ABSORBANCE_CH3', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'absorbance', 'ch3'], 'reply': {'type': float}}
GET_UVDETECTOR_ABSORBANCE_CH4 = {'name': 'GET_UVDETECTOR_ABSORBANCE_CH4', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'absorbance', 'ch4'], 'reply': {'type': float}}
GET_UVDETECTOR_ABSORBANCE_SCAN = {'name': 'GET_UVDETECTOR_ABSORBANCE_SCAN', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'absorbance', 'scan'], 'reply': {'type': float}}
GET_UVDETECTOR_WAVELENGTHS_CH1 = {'name': 'GET_UVDETECTOR_WAVELENGTHS_CH1', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'wavelengths', 'ch1'], 'reply': {'type': int}}
GET_UVDETECTOR_WAVELENGTHS_CH2 = {'name': 'GET_UVDETECTOR_WAVELENGTHS_CH2', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'wavelengths', 'ch2'], 'reply': {'type': int}}
GET_UVDETECTOR_WAVELENGTHS_CH3 = {'name': 'GET_UVDETECTOR_WAVELENGTHS_CH3', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'wavelengths', 'ch3'], 'reply': {'type': int}}
GET_UVDETECTOR_WAVELENGTHS_CH4 = {'name': 'GET_UVDETECTOR_WAVELENGTHS_CH4', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'wavelengths', 'ch4'], 'reply': {'type': int}}
GET_UVDETECTOR_WAVELENGTHS_SCANSTART = {'name': 'GET_UVDETECTOR_WAVELENGTHS_SCANSTART', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'wavelengths', 'scanStart'], 'reply': {'type': int}}
GET_UVDETECTOR_WAVELENGTHS_SCANEND = {'name': 'GET_UVDETECTOR_WAVELENGTHS_SCANEND', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'wavelengths', 'scanEnd'], 'reply': {'type': int}}
GET_UVDETECTOR_ENABLEDCHANNELS_CH1 = {'name': 'GET_UVDETECTOR_ENABLEDCHANNELS_CH1', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'enabledChannels', 'ch1'], 'reply': {'type': str}}
GET_UVDETECTOR_ENABLEDCHANNELS_CH2 = {'name': 'GET_UVDETECTOR_ENABLEDCHANNELS_CH2', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'enabledChannels', 'ch2'], 'reply': {'type': str}}
GET_UVDETECTOR_ENABLEDCHANNELS_CH3 = {'name': 'GET_UVDETECTOR_ENABLEDCHANNELS_CH3', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'enabledChannels', 'ch3'], 'reply': {'type': str}}
GET_UVDETECTOR_ENABLEDCHANNELS_CH4 = {'name': 'GET_UVDETECTOR_ENABLEDCHANNELS_CH4', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'enabledChannels', 'ch4'], 'reply': {'type': str}}
GET_UVDETECTOR_ENABLEDCHANNELS_SCAN = {'name': 'GET_UVDETECTOR_ENABLEDCHANNELS_SCAN', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'enabledChannels', 'scan'], 'reply': {'type': str}}
GET_UVDETECTOR_SENSITIVITY = {'name': 'GET_UVDETECTOR_SENSITIVITY', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'sensitivity'], 'reply': {'type': str}}
GET_UVDETECTOR_SPECTRUM_TIMESINCESTART = {'name': 'GET_UVDETECTOR_SPECTRUM_TIMESINCESTART', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'spectrum', 'timeSinceStart'], 'reply': {'type': str}}
GET_UVDETECTOR_SPECTRUM_VALUES = {'name': 'GET_UVDETECTOR_SPECTRUM_VALUES', 'method': 'GET', 'endpoint': '/api/v1/Process', 'path': ['uvDetector', 'spectrum', 'values'], 'reply': {'type': list}}
SET_RUNMODE = {'name': 'SET_RUNMODE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Flash', 'Prep']}, 'path': ['runMode']}
SET_AIRSYSTEM_ISENABLED = {'name': 'SET_AIRSYSTEM_ISENABLED', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': bool, 'check': None, 'path': ['airSystem', 'isEnabled']}
SET_AIRSYSTEM_VALVEPOS = {'name': 'SET_AIRSYSTEM_VALVEPOS', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Off', 'Elsd', 'Column']}, 'path': ['airSystem', 'valvePos']}
SET_ELSDDETECTOR_LASERISENABLED = {'name': 'SET_ELSDDETECTOR_LASERISENABLED', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': bool, 'check': None, 'path': ['elsdDetector', 'laserIsEnabled']}
SET_ELSDDETECTOR_SHUTTLEVALVEISENABLED = {'name': 'SET_ELSDDETECTOR_SHUTTLEVALVEISENABLED', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': bool, 'check': None, 'path': ['elsdDetector', 'shuttleValveIsEnabled']}
SET_ELSDDETECTOR_CARRIERFLOWISENABLED = {'name': 'SET_ELSDDETECTOR_CARRIERFLOWISENABLED', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': bool, 'check': None, 'path': ['elsdDetector', 'carrierFlowIsEnabled']}
SET_ELSDDETECTOR_SENSITIVITY = {'name': 'SET_ELSDDETECTOR_SENSITIVITY', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Low', 'High']}, 'path': ['elsdDetector', 'sensitivity']}
SET_FRACTIONCOLLECTOR_POSITION_TRAY = {'name': 'SET_FRACTIONCOLLECTOR_POSITION_TRAY', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Left', 'Right', 'Unknown']}, 'path': ['fractionCollector', 'position', 'tray']}
SET_FRACTIONCOLLECTOR_POSITION_VIAL = {'name': 'SET_FRACTIONCOLLECTOR_POSITION_VIAL', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Unknown', 'Home']}, 'path': ['fractionCollector', 'position', 'vial']}
SET_FRACTIONCOLLECTOR_COLLECTIONTASK_ACTION = {'name': 'SET_FRACTIONCOLLECTOR_COLLECTIONTASK_ACTION', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Waste', 'Vial']}, 'path': ['fractionCollector', 'collectionTask', 'action']}
SET_SOLVENTSYSTEM_FLOWISENABLED = {'name': 'SET_SOLVENTSYSTEM_FLOWISENABLED', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': bool, 'check': None, 'path': ['solventSystem', 'flowIsEnabled']}
SET_SOLVENTSYSTEM_FLOWRATE = {'name': 'SET_SOLVENTSYSTEM_FLOWRATE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['solventSystem', 'flowRate']}
SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE1PERCENTAGE = {'name': 'SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE1PERCENTAGE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': float, 'check': None, 'path': ['solventSystem', 'solventMixture', 'line1Percentage']}
SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE2PERCENTAGE = {'name': 'SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE2PERCENTAGE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': float, 'check': None, 'path': ['solventSystem', 'solventMixture', 'line2Percentage']}
SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE3PERCENTAGE = {'name': 'SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE3PERCENTAGE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': float, 'check': None, 'path': ['solventSystem', 'solventMixture', 'line3Percentage']}
SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE4PERCENTAGE = {'name': 'SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE4PERCENTAGE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': float, 'check': None, 'path': ['solventSystem', 'solventMixture', 'line4Percentage']}
SET_SOLVENTSYSTEM_SAMPLEINJECTIONVALVEPOS = {'name': 'SET_SOLVENTSYSTEM_SAMPLEINJECTIONVALVEPOS', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Load', 'Separation']}, 'path': ['solventSystem', 'sampleInjectionValvePos']}
SET_SOLVENTSYSTEM_MODE = {'name': 'SET_SOLVENTSYSTEM_MODE', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Flash', 'Prep']}, 'path': ['solventSystem', 'mode']}
SET_UVDETECTOR_WAVELENGTHS_CH1 = {'name': 'SET_UVDETECTOR_WAVELENGTHS_CH1', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['uvDetector', 'wavelengths', 'ch1']}
SET_UVDETECTOR_WAVELENGTHS_CH2 = {'name': 'SET_UVDETECTOR_WAVELENGTHS_CH2', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['uvDetector', 'wavelengths', 'ch2']}
SET_UVDETECTOR_WAVELENGTHS_CH3 = {'name': 'SET_UVDETECTOR_WAVELENGTHS_CH3', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['uvDetector', 'wavelengths', 'ch3']}
SET_UVDETECTOR_WAVELENGTHS_CH4 = {'name': 'SET_UVDETECTOR_WAVELENGTHS_CH4', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['uvDetector', 'wavelengths', 'ch4']}
SET_UVDETECTOR_WAVELENGTHS_SCANSTART = {'name': 'SET_UVDETECTOR_WAVELENGTHS_SCANSTART', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['uvDetector', 'wavelengths', 'scanStart']}
SET_UVDETECTOR_WAVELENGTHS_SCANEND = {'name': 'SET_UVDETECTOR_WAVELENGTHS_SCANEND', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': int, 'check': None, 'path': ['uvDetector', 'wavelengths', 'scanEnd']}
SET_UVDETECTOR_ENABLEDCHANNELS_CH1 = {'name': 'SET_UVDETECTOR_ENABLEDCHANNELS_CH1', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Off', 'On', 'Monitor']}, 'path': ['uvDetector', 'enabledChannels', 'ch1']}
SET_UVDETECTOR_ENABLEDCHANNELS_CH2 = {'name': 'SET_UVDETECTOR_ENABLEDCHANNELS_CH2', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Off', 'On', 'Monitor']}, 'path': ['uvDetector', 'enabledChannels', 'ch2']}
SET_UVDETECTOR_ENABLEDCHANNELS_CH3 = {'name': 'SET_UVDETECTOR_ENABLEDCHANNELS_CH3', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Off', 'On', 'Monitor']}, 'path': ['uvDetector', 'enabledChannels', 'ch3']}
SET_UVDETECTOR_ENABLEDCHANNELS_CH4 = {'name': 'SET_UVDETECTOR_ENABLEDCHANNELS_CH4', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Off', 'On', 'Monitor']}, 'path': ['uvDetector', 'enabledChannels', 'ch4']}
SET_UVDETECTOR_ENABLEDCHANNELS_SCAN = {'name': 'SET_UVDETECTOR_ENABLEDCHANNELS_SCAN', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Off', 'On', 'Monitor']}, 'path': ['uvDetector', 'enabledChannels', 'scan']}
SET_UVDETECTOR_SENSITIVITY = {'name': 'SET_UVDETECTOR_SENSITIVITY', 'method': 'PUT', 'endpoint': '/api/v1/Process', 'type': str, 'check': {'values': ['Low', 'High']}, 'path': ['uvDetector', 'sensitivity']}
class C815FlashChromatographySystem(AbstractFlashChromatographySystem):
"""
This provides a Python class for the Buchi C815 flash chromatography system
based on the Buchi OpenAPI specification v.1
"""
def __init__(self,
device_name: str,
connection_mode: str,
address: Optional[str],
port: Union[str, int],
user: Optional[str],
password: Optional[str]) -> None:
# Load commands from helper class
self.cmd = C815Commands
# Connection settings
connection_parameters: ConnectionParameters = {}
connection_parameters["user"] = user
connection_parameters["schema"] = "http"
connection_parameters["password"] = password
connection_parameters["address"] = address
connection_parameters["port"] = port
connection_parameters["verify_ssl"] = False
connection_parameters["headers"] = {"Content-Type": "application/json"}
super().__init__(
device_name, connection_mode, connection_parameters)
# Protocol settings
self.reply_prefix = None
self.reply_terminator = None
# Disable requests warnings about Buchi self-signed certificate
warnings.filterwarnings("ignore", message="InsecureRequestWarning")
def prepare_message(self, cmd: Dict, value: Any) -> Any:
""" Checks parameter value if necessary and prepares JSON payload
"""
message = {}
message["endpoint"] = cmd["endpoint"]
message["method"] = cmd["method"]
# Prepare payload
payload = None
# Check that value is empty for GET requests
if cmd["method"] == "GET":
if value is not None:
self.logger.warning("Trying to send GET request with non-empty payload <%s>", value)
else:
path_to_payload = cmd["path"].copy()
parameter = path_to_payload.pop()
payload = {parameter: value}
# The easiest way to build the rest of the nested dict we need
# is to start bottom up
path_to_payload.reverse()
# Wrap the rest of stuff around
for item in path_to_payload:
payload = {item: payload}
payload = json.dumps(payload)
message["data"] = payload
self.logger.debug("prepare_message()::constructed payload <%s>", payload)
return message
def parse_reply(self, cmd: Dict, reply: LabDeviceReply) -> Any:
""" Parses JSON payload and returns device reply.
"""
# Extract and load reply body
if reply.content_type != "json":
raise PLDeviceReplyError(f"Invalid content-type {reply.content_type} in device reply!")
try:
reply = json.loads(reply.body)
except (json.JSONDecodeError, TypeError) as e:
raise PLDeviceReplyError("Can't transform device reply to JSON!") from e
# Extract required element from JSON tree
for item in cmd["path"]:
reply = reply[item]
# Run text parsing / type casting, if any
return super().parse_reply(cmd, reply)
def initialize_device(self) -> None:
""" Initialization sequence
"""
# TODO Add any initialization if necessary
@in_simulation_device_returns(C815Commands.C815_SYSTEMMODEL)
def is_connected(self) -> bool:
""" Stateless device - always connected
"""
try:
return self.send(self.cmd.GET_SYSTEMMODEL) == self.cmd.C815_SYSTEMMODEL
except PLConnectionError:
return False
@in_simulation_device_returns(C815Commands.C815_IDLE_STATE)
def is_idle(self) -> bool:
""" Checks whether the device is idle
"""
return self.get_runningstate == self.cmd.C815_IDLE_STATE
def check_errors(self) -> None:
""" Not supported on this model
"""
def clear_errors(self) -> None:
""" Not supported on this model
"""
def get_status(self) -> str:
""" Gets device status.
"""
return self.get_runningstate()
def get_systemclass(self) -> str:
"""
"""
return self.send(self.cmd.GET_SYSTEMCLASS)
def get_systemline(self) -> str:
"""
"""
return self.send(self.cmd.GET_SYSTEMLINE)
def get_systemname(self) -> str:
"""
"""
return self.send(self.cmd.GET_SYSTEMNAME)
def get_systemmodel(self) -> str:
"""
"""
return self.send(self.cmd.GET_SYSTEMMODEL)
def get_detectors(self) -> list:
"""
"""
return self.send(self.cmd.GET_DETECTORS)
def get_pump_pumptype(self) -> str:
"""
"""
return self.send(self.cmd.GET_PUMP_PUMPTYPE)
def get_pump_firmware(self) -> str:
"""
"""
return self.send(self.cmd.GET_PUMP_FIRMWARE)
def get_pump_hardware(self) -> str:
"""
"""
return self.send(self.cmd.GET_PUMP_HARDWARE)
def get_fractioncollector_firmware(self) -> str:
"""
"""
return self.send(self.cmd.GET_FRACTIONCOLLECTOR_FIRMWARE)
def get_fractioncollector_trays(self) -> list:
"""
"""
return self.send(self.cmd.GET_FRACTIONCOLLECTOR_TRAYS)
def get_column_version(self) -> str:
"""
"""
return self.send(self.cmd.GET_COLUMN_VERSION)
def get_column_columnname(self) -> str:
"""
"""
return self.send(self.cmd.GET_COLUMN_COLUMNNAME)
def get_column_data(self) -> str:
"""
"""
return self.send(self.cmd.GET_COLUMN_DATA)
def get_runningstate(self) -> str:
"""
"""
return self.send(self.cmd.GET_RUNNINGSTATE)
def get_runmode(self) -> str:
"""
"""
return self.send(self.cmd.GET_RUNMODE)
def get_sensors_solventpressureafterpump(self) -> float:
"""
"""
return self.send(self.cmd.GET_SENSORS_SOLVENTPRESSUREAFTERPUMP)
def get_sensors_solventpressureaftercolumn(self) -> float:
"""
"""
return self.send(self.cmd.GET_SENSORS_SOLVENTPRESSUREAFTERCOLUMN)
def get_sensors_airpressurenebulizer(self) -> float:
"""
"""
return self.send(self.cmd.GET_SENSORS_AIRPRESSURENEBULIZER)
def get_sensors_airpressureinlet(self) -> float:
"""
"""
return self.send(self.cmd.GET_SENSORS_AIRPRESSUREINLET)
def get_sensors_vaporlevel(self) -> int:
"""
"""
return self.send(self.cmd.GET_SENSORS_VAPORLEVEL)
def get_sensors_solventlevels(self) -> list:
"""
"""
return self.send(self.cmd.GET_SENSORS_SOLVENTLEVELS)
def get_sensors_wastelevel(self) -> float:
"""
"""
return self.send(self.cmd.GET_SENSORS_WASTELEVEL)
def get_airsystem_isenabled(self) -> bool:
"""
"""
return self.send(self.cmd.GET_AIRSYSTEM_ISENABLED)
def get_airsystem_valvepos(self) -> str:
"""
"""
return self.send(self.cmd.GET_AIRSYSTEM_VALVEPOS)
def get_elsddetector_laserisenabled(self) -> bool:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_LASERISENABLED)
def get_elsddetector_laservoltage(self) -> float:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_LASERVOLTAGE)
def get_elsddetector_shuttlevalveisenabled(self) -> bool:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_SHUTTLEVALVEISENABLED)
def get_elsddetector_carrierflowisenabled(self) -> bool:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_CARRIERFLOWISENABLED)
def get_elsddetector_sensitivity(self) -> str:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_SENSITIVITY)
def get_elsddetector_signal_timesincestart(self) -> str:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_SIGNAL_TIMESINCESTART)
def get_elsddetector_signal_signal(self) -> float:
"""
"""
return self.send(self.cmd.GET_ELSDDETECTOR_SIGNAL_SIGNAL)
def get_fractioncollector_position_tray(self) -> str:
"""
"""
return self.send(self.cmd.GET_FRACTIONCOLLECTOR_POSITION_TRAY)
def get_fractioncollector_position_vial(self) -> str:
"""
"""
return self.send(self.cmd.GET_FRACTIONCOLLECTOR_POSITION_VIAL)
def get_fractioncollector_collectiontask_action(self) -> str:
"""
"""
return self.send(self.cmd.GET_FRACTIONCOLLECTOR_COLLECTIONTASK_ACTION)
def get_solventsystem_flowisenabled(self) -> bool:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_FLOWISENABLED)
def get_solventsystem_flowrate(self) -> int:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_FLOWRATE)
def get_solventsystem_solventmixture_line1percentage(self) -> float:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE1PERCENTAGE)
def get_solventsystem_solventmixture_line2percentage(self) -> float:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE2PERCENTAGE)
def get_solventsystem_solventmixture_line3percentage(self) -> float:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE3PERCENTAGE)
def get_solventsystem_solventmixture_line4percentage(self) -> float:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE4PERCENTAGE)
def get_solventsystem_sampleinjectionvalvepos(self) -> str:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_SAMPLEINJECTIONVALVEPOS)
def get_solventsystem_mode(self) -> str:
"""
"""
return self.send(self.cmd.GET_SOLVENTSYSTEM_MODE)
def get_uvdetector_absorbance_timesincestart(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ABSORBANCE_TIMESINCESTART)
def get_uvdetector_absorbance_ch1(self) -> float:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ABSORBANCE_CH1)
def get_uvdetector_absorbance_ch2(self) -> float:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ABSORBANCE_CH2)
def get_uvdetector_absorbance_ch3(self) -> float:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ABSORBANCE_CH3)
def get_uvdetector_absorbance_ch4(self) -> float:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ABSORBANCE_CH4)
def get_uvdetector_absorbance_scan(self) -> float:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ABSORBANCE_SCAN)
def get_uvdetector_wavelengths_ch1(self) -> int:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_WAVELENGTHS_CH1)
def get_uvdetector_wavelengths_ch2(self) -> int:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_WAVELENGTHS_CH2)
def get_uvdetector_wavelengths_ch3(self) -> int:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_WAVELENGTHS_CH3)
def get_uvdetector_wavelengths_ch4(self) -> int:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_WAVELENGTHS_CH4)
def get_uvdetector_wavelengths_scanstart(self) -> int:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_WAVELENGTHS_SCANSTART)
def get_uvdetector_wavelengths_scanend(self) -> int:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_WAVELENGTHS_SCANEND)
def get_uvdetector_enabledchannels_ch1(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ENABLEDCHANNELS_CH1)
def get_uvdetector_enabledchannels_ch2(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ENABLEDCHANNELS_CH2)
def get_uvdetector_enabledchannels_ch3(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ENABLEDCHANNELS_CH3)
def get_uvdetector_enabledchannels_ch4(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ENABLEDCHANNELS_CH4)
def get_uvdetector_enabledchannels_scan(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_ENABLEDCHANNELS_SCAN)
def get_uvdetector_sensitivity(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_SENSITIVITY)
def get_uvdetector_spectrum_timesincestart(self) -> str:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_SPECTRUM_TIMESINCESTART)
def get_uvdetector_spectrum_values(self) -> list:
"""
"""
return self.send(self.cmd.GET_UVDETECTOR_SPECTRUM_VALUES)
def set_runmode(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_RUNMODE, value)
def set_airsystem_isenabled(self, value: bool) -> None:
"""
"""
self.send(self.cmd.SET_AIRSYSTEM_ISENABLED, value)
def set_airsystem_valvepos(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_AIRSYSTEM_VALVEPOS, value)
def set_elsddetector_laserisenabled(self, value: bool) -> None:
"""
"""
self.send(self.cmd.SET_ELSDDETECTOR_LASERISENABLED, value)
def set_elsddetector_shuttlevalveisenabled(self, value: bool) -> None:
"""
"""
self.send(self.cmd.SET_ELSDDETECTOR_SHUTTLEVALVEISENABLED, value)
def set_elsddetector_carrierflowisenabled(self, value: bool) -> None:
"""
"""
self.send(self.cmd.SET_ELSDDETECTOR_CARRIERFLOWISENABLED, value)
def set_elsddetector_sensitivity(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_ELSDDETECTOR_SENSITIVITY, value)
def set_fractioncollector_position_tray(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_FRACTIONCOLLECTOR_POSITION_TRAY, value)
def set_fractioncollector_position_vial(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_FRACTIONCOLLECTOR_POSITION_VIAL, value)
def set_fractioncollector_collectiontask_action(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_FRACTIONCOLLECTOR_COLLECTIONTASK_ACTION, value)
def set_solventsystem_flowisenabled(self, value: bool) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_FLOWISENABLED, value)
def set_solventsystem_flowrate(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_FLOWRATE, value)
def set_solventsystem_solventmixture_line1percentage(self, value: float) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE1PERCENTAGE, value)
def set_solventsystem_solventmixture_line2percentage(self, value: float) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE2PERCENTAGE, value)
def set_solventsystem_solventmixture_line3percentage(self, value: float) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE3PERCENTAGE, value)
def set_solventsystem_solventmixture_line4percentage(self, value: float) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_SOLVENTMIXTURE_LINE4PERCENTAGE, value)
def set_solventsystem_sampleinjectionvalvepos(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_SAMPLEINJECTIONVALVEPOS, value)
def set_solventsystem_mode(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_SOLVENTSYSTEM_MODE, value)
def set_uvdetector_wavelengths_ch1(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_WAVELENGTHS_CH1, value)
def set_uvdetector_wavelengths_ch2(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_WAVELENGTHS_CH2, value)
def set_uvdetector_wavelengths_ch3(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_WAVELENGTHS_CH3, value)
def set_uvdetector_wavelengths_ch4(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_WAVELENGTHS_CH4, value)
def set_uvdetector_wavelengths_scanstart(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_WAVELENGTHS_SCANSTART, value)
def set_uvdetector_wavelengths_scanend(self, value: int) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_WAVELENGTHS_SCANEND, value)
def set_uvdetector_enabledchannels_ch1(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_ENABLEDCHANNELS_CH1, value)
def set_uvdetector_enabledchannels_ch2(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_ENABLEDCHANNELS_CH2, value)
def set_uvdetector_enabledchannels_ch3(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_ENABLEDCHANNELS_CH3, value)
def set_uvdetector_enabledchannels_ch4(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_ENABLEDCHANNELS_CH4, value)
def set_uvdetector_enabledchannels_scan(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_ENABLEDCHANNELS_SCAN, value)
def set_uvdetector_sensitivity(self, value: str) -> None:
"""
"""
self.send(self.cmd.SET_UVDETECTOR_SENSITIVITY, value)
|
py | 1a35bcf765600c3c0910ade72b7de279a2b2d488 | # -*- coding: utf-8 -*-
# file: training.py
# time: 2021/5/26 0026
# author: yangheng <[email protected]>
# github: https://github.com/yangheng95
# Copyright (C) 2021. All Rights Reserved.
########################################################################################################################
# train and evaluate on your own apc_datasets (need train and test apc_datasets) #
# your custom dataset_utils should have the continue polarity labels like [0,N-1] for N categories #
########################################################################################################################
from pyabsa.functional import Trainer
from pyabsa.functional import APCConfigManager
from pyabsa.functional import ABSADatasetList
from pyabsa.functional import APCModelList
config = APCConfigManager.get_apc_config_chinese()
config.evaluate_begin = 4
config.dropout = 0.5
config.l2reg = 0.0001
config.model = APCModelList.FAST_LCF_BERT
save_path = 'state_dict'
chinese_sets = ABSADatasetList.Chinese
sent_classifier = Trainer(config=config, # set config=None to use default model
dataset=chinese_sets, # train set and test set will be automatically detected
auto_device=True # automatic choose CUDA or CPU
)
|
py | 1a35bd5ba696c6919eade57894e66381da15bedc | import asyncio
import datetime
import logging
import time
from collections import defaultdict
from contextlib import suppress
from datetime import timedelta
from io import BytesIO
from typing import Any, Iterable, NoReturn, Optional, Set
import discord
import prettytable
import pytz
from redbot.core import Config, checks, commands
from redbot.core.utils.chat_formatting import box, pagify
from tsutils.enums import Server, StarterGroup
from tsutils.formatting import normalize_server_name
from tsutils.helper_classes import DummyObject
from tsutils.helper_functions import conditional_iterator, repeating_timer
from padevents.autoevent_mixin import AutoEvent
from padevents.enums import DungeonType, EventLength
from padevents.events import Event, EventList, SERVER_TIMEZONES
logger = logging.getLogger('red.padbot-cogs.padevents')
SUPPORTED_SERVERS = ["JP", "NA", "KR"]
GROUPS = ['red', 'blue', 'green']
class PadEvents(commands.Cog, AutoEvent):
"""Pad Event Tracker"""
def __init__(self, bot, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.config = Config.get_conf(self, identifier=940373775)
self.config.register_global(sent={}, last_daychange=None)
self.config.register_guild(pingroles={})
self.config.register_channel(guerrilla_servers=[], daily_servers=[], do_aep_post=True)
self.config.register_user(dmevents=[])
# Load event data
self.events = set()
self.started_events = set()
self.fake_uid = -time.time()
self._event_loop = bot.loop.create_task(self.reload_padevents())
self._refresh_loop = bot.loop.create_task(self.do_loop())
self._daily_event_loop = bot.loop.create_task(self.show_daily_info())
async def red_get_data_for_user(self, *, user_id):
"""Get a user's personal data."""
aeds = await self.config.user_from_id(user_id).dmevents()
if aeds:
data = f"You have {len(aeds)} AEDs stored. Use" \
f" {(await self.bot.get_valid_prefixes())[0]}aed list to see what they are.\n"
else:
data = f"No data is stored for user with ID {user_id}."
return {"user_data.txt": BytesIO(data.encode())}
async def red_delete_data_for_user(self, *, requester, user_id):
"""Delete a user's personal data."""
await self.config.user_from_id(user_id).clear()
def cog_unload(self):
# Manually nulling out database because the GC for cogs seems to be pretty shitty
self.events = set()
self.started_events = set()
self._event_loop.cancel()
self._refresh_loop.cancel()
self._daily_event_loop.cancel()
async def reload_padevents(self) -> NoReturn:
await self.bot.wait_until_ready()
with suppress(asyncio.CancelledError):
async for _ in repeating_timer(60 * 60):
try:
await self.refresh_data()
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Error in loop:")
async def do_loop(self) -> NoReturn:
await self.bot.wait_until_ready()
with suppress(asyncio.CancelledError):
async for _ in repeating_timer(10):
try:
await self.do_autoevents()
await self.do_eventloop()
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Error in loop:")
async def show_daily_info(self) -> NoReturn:
async def is_day_change():
curserver = self.get_most_recent_day_change()
oldserver = self.config.last_daychange
if curserver != await oldserver():
await oldserver.set(curserver)
return curserver
await self.bot.wait_until_ready()
with suppress(asyncio.CancelledError):
async for server in conditional_iterator(is_day_change, poll_interval=10):
try:
await self.do_daily_post(server)
await self.do_autoevent_summary(server)
except asyncio.CancelledError:
raise
except Exception:
logger.exception("Error in loop:")
async def refresh_data(self):
dbcog: Any = self.bot.get_cog('DBCog')
await dbcog.wait_until_ready()
scheduled_events = dbcog.database.get_all_events()
new_events = set()
for se in scheduled_events:
try:
new_events.add(Event(se))
except Exception as ex:
logger.exception("Refresh error:")
self.events = self.coalesce_event_data(new_events)
self.started_events = {ev.key for ev in new_events if ev.is_started()}
async with self.config.sent() as seen:
for key, value in [*seen.items()]:
if value < time.time() - 60 * 60:
del seen[key]
async def do_eventloop(self):
events = filter(lambda e: e.is_started() and e.key not in self.started_events, self.events)
daily_refresh_servers = set()
for event in events:
self.started_events.add(event.key)
if event.event_length != EventLength.limited:
continue
for cid, data in (await self.config.all_channels()).items():
if (channel := self.bot.get_channel(cid)) is None \
or event.server not in data['guerrilla_servers']:
continue
role_name = f'{event.server}_group_{event.group_long_name()}'
role = channel.guild.get_role(role_name)
if role and role.mentionable:
message = f"{role.mention} {event.clean_dungeon_name} is starting"
else:
message = box(f"Server {event.server}, group {event.group_long_name()}:"
f" {event.clean_dungeon_name}")
with suppress(discord.Forbidden):
await channel.send(message, allowed_mentions=discord.AllowedMentions(roles=True))
async def do_daily_post(self, server):
msg = self.make_active_text(server)
for cid, data in (await self.config.all_channels()).items():
if (channel := self.bot.get_channel(cid)) is None \
or server not in data['daily_servers']:
continue
for page in pagify(msg, delims=['\n\n']):
with suppress(discord.Forbidden):
await channel.send(box(page))
async def do_autoevent_summary(self, server):
events = EventList(self.events).with_server(server).today_only('NA')
for gid, data in (await self.config.all_guilds()).items():
if (guild := self.bot.get_guild(gid)) is None:
continue
channels = defaultdict(list)
for key, aep in data.get('pingroles', {}).items():
for channel in aep['channels']:
if channel is not None:
channels[channel].append(aep)
for cid, aeps in channels.items():
if (channel := self.bot.get_channel(cid)) is None:
continue
if not await self.config.channel(channel).do_aep_post():
continue
aepevents = events.with_func(lambda e: any(self.event_matches_autoevent(e, ae) for ae in aeps))
if not aepevents:
continue
msg = self.make_full_guerrilla_output('AEP Event', aepevents)
for page in pagify(msg, delims=['\n\n']):
with suppress(discord.Forbidden):
await channel.send(box(page))
@commands.group(aliases=['pde'])
@checks.mod_or_permissions(manage_guild=True)
async def padevents(self, ctx):
"""PAD event tracking"""
@padevents.command()
@checks.is_owner()
async def testevent(self, ctx, server: Server, seconds: int = 0, group='red'):
server = server.value
if group.lower() not in ('red', 'blue', 'green'):
group = None
dbcog: Any = self.bot.get_cog('DBCog')
await dbcog.wait_until_ready()
# TODO: Don't use this awful importing hack
dg_module = __import__('.'.join(dbcog.__module__.split('.')[:-1]) + ".models.scheduled_event_model")
timestamp = int((datetime.datetime.now(pytz.utc) + timedelta(seconds=seconds)).timestamp())
self.fake_uid -= 1
te = dg_module.models.scheduled_event_model.ScheduledEventModel(
event_id=self.fake_uid,
server_id=SUPPORTED_SERVERS.index(server),
event_type_id=-1,
start_timestamp=timestamp,
end_timestamp=timestamp + 60,
group_name=group and group.lower(),
dungeon_model=DummyObject(
name_en='fake_dungeon_name',
clean_name_en='fake_dungeon_name',
dungeon_type=DungeonType.ThreePlayer,
dungeon_id=1,
)
)
self.events.add(Event(te))
await ctx.tick()
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def addchannel(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).guerrilla_servers() as guerillas:
if server in guerillas:
return await ctx.send("Channel already active.")
guerillas.append(server)
await ctx.send("Channel now active.")
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def rmchannel(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).guerrilla_servers() as guerillas:
if server not in guerillas:
return await ctx.send("Channel already inactive.")
guerillas.remove(server)
await ctx.send("Channel now inactive.")
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def addchanneldaily(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).daily_servers() as dailies:
if server in dailies:
return await ctx.send("Channel already active.")
dailies.append(server)
await ctx.send("Channel now active.")
@padevents.command()
@commands.guild_only()
@checks.mod_or_permissions(manage_guild=True)
async def rmchanneldaily(self, ctx, channel: Optional[discord.TextChannel], server: Server):
server = server.value
async with self.config.channel(channel or ctx.channel).daily_servers() as dailies:
if server not in dailies:
return await ctx.send("Channel already inactive.")
dailies.remove(server)
await ctx.send("Channel now inactive.")
@padevents.command()
@checks.mod_or_permissions(manage_guild=True)
async def active(self, ctx, server: Server):
server = server.value
msg = self.make_active_text(server)
for page in pagify(msg, delims=['\n\n']):
await ctx.send(box(page))
def make_active_text(self, server):
server = normalize_server_name(server)
server_events = EventList(self.events).with_server(server)
active_events = server_events.active_only()
events_today = server_events.today_only(server)
active_special = active_events.with_dungeon_type(DungeonType.Special)
msg = server + " Events - " + datetime.datetime.now(SERVER_TIMEZONES[server]).strftime('%A, %B %-e')
ongoing_events = active_events.with_length(EventLength.weekly, EventLength.special)
if ongoing_events:
msg += "\n\n" + self.make_active_output('Ongoing Events', ongoing_events)
active_dailies_events = active_events.with_length(EventLength.daily)
if active_dailies_events:
msg += "\n\n" + self.make_daily_output('Daily Dungeons', active_dailies_events)
limited_events = events_today.with_length(EventLength.limited)
if limited_events:
msg += "\n\n" + self.make_full_guerrilla_output('Limited Events', limited_events)
return msg
def make_daily_output(self, table_name, event_list):
tbl = prettytable.PrettyTable([table_name])
tbl.hrules = prettytable.HEADER
tbl.vrules = prettytable.NONE
tbl.align[table_name] = "l"
for e in event_list:
tbl.add_row([e.clean_dungeon_name])
return tbl.get_string()
def make_active_output(self, table_name, event_list):
tbl = prettytable.PrettyTable(["Time", table_name])
tbl.hrules = prettytable.HEADER
tbl.vrules = prettytable.NONE
tbl.align[table_name] = "l"
tbl.align["Time"] = "r"
for e in event_list:
tbl.add_row([e.end_from_now_full_min().strip(), e.clean_dungeon_name])
return tbl.get_string()
def make_active_guerrilla_output(self, table_name: str, event_list: EventList) -> str:
tbl = prettytable.PrettyTable([table_name, "Group", "Time"])
tbl.hrules = prettytable.HEADER
tbl.vrules = prettytable.NONE
tbl.align[table_name] = "l"
tbl.align["Time"] = "r"
for e in event_list:
tbl.add_row([e.clean_dungeon_name, e.group, e.end_from_now_full_min().strip()])
return tbl.get_string()
def make_full_guerrilla_output(self, table_name, event_list):
events_by_name = defaultdict(set)
for event in event_list:
events_by_name[event.clean_dungeon_name].add(event)
rows = []
for name, events in events_by_name.items():
events = sorted(events, key=lambda e: e.open_datetime)
events_by_group = {group: [] for group in GROUPS}
for event in events:
if event.group is not None:
events_by_group[event.group].append(event)
else:
for group in GROUPS:
events_by_group[group].append(event)
while True:
row = []
for group in GROUPS:
if len(events_by_group[group]) == 0:
row.append('')
else:
# Get the timestamp of the earliest event in this group in PST
start = events_by_group[group].pop(0).open_datetime.astimezone(pytz.timezone('US/Pacific'))
row.append(start.strftime("%H:%M"))
if not any(row):
break
if row[0] == row[1] == row[2]:
rows.append([name, row[0], '=', '='])
else:
rows.append([name] + row)
header = "Times are shown in Pacific Time\n= means same for all groups\n"
table = prettytable.PrettyTable([table_name, 'Red', 'Blue', 'Green'])
table.align[table_name] = "l"
table.hrules = prettytable.HEADER
table.vrules = prettytable.ALL
for r in rows:
table.add_row(r)
return header + table.get_string() + "\n"
@commands.command(aliases=['events'])
async def eventsna(self, ctx, group: StarterGroup = None):
"""Display upcoming daily events for NA."""
await self.do_partial(ctx, Server.NA, group)
@commands.command()
async def eventsjp(self, ctx, group: StarterGroup = None):
"""Display upcoming daily events for JP."""
await self.do_partial(ctx, Server.JP, group)
@commands.command()
async def eventskr(self, ctx, group: StarterGroup = None):
"""Display upcoming daily events for KR."""
await self.do_partial(ctx, Server.KR, group)
async def do_partial(self, ctx, server: Server, group: StarterGroup = None):
server = server.value
if group is not None:
group = GROUPS[group.value]
events = EventList(self.events)
events = events.with_server(server)
events = events.with_dungeon_type(DungeonType.SoloSpecial, DungeonType.Special)
events = events.with_length(EventLength.limited)
active_events = sorted(events.active_only(), key=lambda e: (e.open_datetime, e.dungeon_name), reverse=True)
pending_events = sorted(events.pending_only(), key=lambda e: (e.open_datetime, e.dungeon_name), reverse=True)
if group is not None:
active_events = [e for e in active_events if e.group == group.lower()]
pending_events = [e for e in pending_events if e.group == group.lower()]
group_to_active_event = {e.group: e for e in active_events}
group_to_pending_event = {e.group: e for e in pending_events}
active_events.sort(key=lambda e: (GROUPS.index(e.group or 'red'), e.open_datetime))
pending_events.sort(key=lambda e: (GROUPS.index(e.group or 'red'), e.open_datetime))
if len(active_events) == 0 and len(pending_events) == 0:
await ctx.send("No events available for " + server)
return
output = "**Events for {}**".format(server)
if len(active_events) > 0:
output += "\n\n" + "` Remaining Dungeon - Ending Time`"
for e in active_events:
output += "\n" + e.to_partial_event(self)
if len(pending_events) > 0:
output += "\n\n" + "` Dungeon - ETA`"
for e in pending_events:
output += "\n" + e.to_partial_event(self)
for page in pagify(output):
await ctx.send(page)
def get_most_recent_day_change(self):
now = datetime.datetime.utcnow().time()
if now < datetime.time(8):
return "JP"
elif now < datetime.time(15):
return "NA"
elif now < datetime.time(16):
return "KR"
else:
return "JP"
def coalesce_event_data(self, events: Iterable[Event]) -> Set[Event]:
all_events = set()
grouped = defaultdict(lambda: {})
for event in events:
if event.group is None:
all_events.add(event)
continue
key = (event.open_datetime, event.close_datetime, event.server, event.dungeon.dungeon_id)
grouped[key][event.group] = event
for _, grouped_events in grouped.items():
if len(grouped_events) != 3:
all_events.update(grouped_events.values())
continue
grouped_events['red'].group = None
all_events.add(grouped_events['red'])
return all_events
|
py | 1a35bd983be48abf57ec7a66b33e049276eb99e9 | from setuptools import setup, find_packages
import io
import os
here = os.path.abspath(os.path.dirname(__file__))
# Avoids IDE errors, but actual version is read from version.py
__version__ = None
exec(open('rasa_core/version.py').read())
# Get the long description from the README file
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
tests_requires = [
"pytest",
"pytest-pep8",
"pytest-services",
"pytest-cov",
"pytest-xdist",
"pytest-twisted<1.6",
"treq",
"freezegun",
]
install_requires = [
'jsonpickle',
'six',
'redis',
'fakeredis',
'nbsphinx',
'pandoc',
'future',
'numpy>=1.13',
'typing>=3.6',
'requests',
'graphviz',
'Keras',
'tensorflow>=1.7',
'h5py',
'apscheduler',
'tqdm',
'ConfigArgParse',
'networkx',
'fbmessenger>=5.0.0',
'pykwalify<=1.6.0',
'coloredlogs',
'ruamel.yaml',
'flask',
'scikit-learn',
'rasa_nlu>=0.12.0,<0.13.0',
'slackclient',
'python-telegram-bot',
'twilio',
'mattermostwrapper',
'colorhash',
]
extras_requires = {
'test': tests_requires
}
setup(
name='rasa-core',
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
# supported python versions
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries",
],
packages=find_packages(exclude=["tests", "tools"]),
version=__version__,
install_requires=install_requires,
tests_require=tests_requires,
extras_require=extras_requires,
include_package_data=True,
description="Machine learning based dialogue engine "
"for conversational software.",
long_description=long_description,
long_description_content_type="text/markdown",
author='Rasa Technologies GmbH',
author_email='[email protected]',
maintainer="Tom Bocklisch",
maintainer_email="[email protected]",
license='Apache 2.0',
keywords="nlp machine-learning machine-learning-library bot bots "
"botkit rasa conversational-agents conversational-ai chatbot"
"chatbot-framework bot-framework",
url="https://rasa.ai",
download_url="https://github.com/RasaHQ/rasa_core/archive/{}.tar.gz".format(__version__),
project_urls={
'Bug Reports': 'https://github.com/rasahq/rasa_core/issues',
'Source': 'https://github.com/rasahq/rasa_core',
},
)
print("\nWelcome to Rasa Core!")
print("If any questions please visit documentation page https://core.rasa.com")
print("or join community chat on https://gitter.im/RasaHQ/rasa_core")
|
py | 1a35bdd2d2b07f990cd4aa699e5df69cd0299f00 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013 The Plaso Project Authors.
# Please see the AUTHORS file for details on individual authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This file contains a parser for the Android SMS database.
Android SMS messages are stored in SQLite database files named mmssms.dbs.
"""
from plaso.events import time_events
from plaso.lib import eventdata
from plaso.parsers import sqlite
from plaso.parsers.sqlite_plugins import interface
class AndroidSmsEvent(time_events.JavaTimeEvent):
"""Convenience class for an Android SMS event."""
DATA_TYPE = 'android:messaging:sms'
def __init__(self, java_time, identifier, address, sms_read, sms_type, body):
"""Initializes the event object.
Args:
java_time: The Java time value.
identifier: The row identifier.
address: The phone number associated to the sender/receiver.
status: Read or Unread.
type: Sent or Received.
body: Content of the SMS text message.
"""
super(AndroidSmsEvent, self).__init__(
java_time, eventdata.EventTimestamp.CREATION_TIME)
self.offset = identifier
self.address = address
self.sms_read = sms_read
self.sms_type = sms_type
self.body = body
class AndroidSmsPlugin(interface.SQLitePlugin):
"""Parse Android SMS database."""
NAME = 'android_sms'
DESCRIPTION = u'Parser for Android text messages SQLite database files.'
# Define the needed queries.
QUERIES = [
('SELECT _id AS id, address, date, read, type, body FROM sms',
'ParseSmsRow')]
# The required tables.
REQUIRED_TABLES = frozenset(['sms'])
SMS_TYPE = {
1: u'RECEIVED',
2: u'SENT'}
SMS_READ = {
0: u'UNREAD',
1: u'READ'}
def ParseSmsRow(self, parser_context, row, query=None, **unused_kwargs):
"""Parses an SMS row.
Args:
parser_context: A parser context object (instance of ParserContext).
row: The row resulting from the query.
query: Optional query string. The default is None.
"""
# Extract and lookup the SMS type and read status.
sms_type = self.SMS_TYPE.get(row['type'], u'UNKNOWN')
sms_read = self.SMS_READ.get(row['read'], u'UNKNOWN')
event_object = AndroidSmsEvent(
row['date'], row['id'], row['address'], sms_read, sms_type, row['body'])
parser_context.ProduceEvent(
event_object, plugin_name=self.NAME, query=query)
sqlite.SQLiteParser.RegisterPlugin(AndroidSmsPlugin)
|
py | 1a35beee1a23aaf83d2944cdbf52d3361e1fed75 |
from django.conf import settings
from django.conf.urls import include
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import path, re_path
from app import views
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('op_trans.urls')),
path('v1/', views.index, name="conversations"),
path('v2/', views.frontend, name="frontend"),
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
|
py | 1a35c00e724489ff942d7cdb5790db63053c3333 | # -*- coding: utf-8 -*-
# Copyright © 2012-2014 Roberto Alsina and others.
# Permission is hereby granted, free of charge, to any
# person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the
# Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice
# shall be included in all copies or substantial portions of
# the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS
# OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Implementation of compile_html based on asciidoc.
You will need, of course, to install asciidoc
"""
import codecs
import os
import subprocess
from nikola.plugin_categories import PageCompiler
from nikola.utils import makedirs, req_missing, write_metadata
try:
from collections import OrderedDict
except ImportError:
OrderedDict = dict # NOQA
class CompileAsciiDoc(PageCompiler):
"""Compile asciidoc into HTML."""
name = "asciidoc"
demote_headers = True
def compile_html(self, source, dest, is_two_file=True):
makedirs(os.path.dirname(dest))
binary = self.site.config.get('ASCIIDOC_BINARY', 'asciidoc')
try:
subprocess.check_call((binary, '-b', 'html5', '-s', '-o', dest, source))
except OSError as e:
if e.strreror == 'No such file or directory':
req_missing(['asciidoc'], 'build this site (compile with asciidoc)', python=False)
def create_post(self, path, **kw):
content = kw.pop('content', 'Write your post here.')
one_file = kw.pop('onefile', False) # NOQA
is_page = kw.pop('is_page', False) # NOQA
metadata = OrderedDict()
metadata.update(self.default_metadata)
metadata.update(kw)
makedirs(os.path.dirname(path))
if not content.endswith('\n'):
content += '\n'
with codecs.open(path, "wb+", "utf8") as fd:
if one_file:
fd.write("////\n")
fd.write(write_metadata(metadata))
fd.write("////\n")
fd.write(content)
|
py | 1a35c06ad66107afe58edf73398a00bc01665176 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from nefi2.model.algorithms._alg import Algorithm, FloatSlider, CheckBox
import cv2
__authors__ = {"Sebastian Schattner": "[email protected]"}
class AlgBody(Algorithm):
"""Color enhancement algorithm implementation"""
def __init__(self):
Algorithm.__init__(self)
self.name = "Color Enhance"
self.parent = "Preprocessing"
self.left_pct = FloatSlider("left percentage", 0.0, 10.0, 0.1, 2.5)
self.right_pct = FloatSlider("right percentage", 0.0, 10.0, 0.1, 2.5)
self.channel1 = CheckBox("channel1", True)
self.channel2 = CheckBox("channel2", True)
self.channel3 = CheckBox("channel3", True)
self.float_sliders.append(self.left_pct)
self.float_sliders.append(self.right_pct)
# self.checkboxes.append(self.channel1)
# self.checkboxes.append(self.channel2)
# self.checkboxes.append(self.channel3)
def process(self, args):
channels = cv2.split(args[0])
if self.channel1.value:
channels[0] = self.compute_channels(channels[0])
if self.channel2.value:
channels[1] = self.compute_channels(channels[1])
if self.channel3.value:
channels[2] = self.compute_channels(channels[2])
self.result['img'] = cv2.merge(channels)
def compute_channels(self, image_channel):
vmin = 0
vmax = 255
hist = cv2.calcHist([image_channel], [0], None, [256], [0, 256])
cdf = hist.cumsum()
for i, e in list(enumerate(cdf)):
if e > image_channel.size * (self.left_pct.value / 100):
if i != 0:
vmin = i-1
break
for i, e in list(enumerate(cdf)):
if e > image_channel.size * (1 - (self.right_pct.value / 100)):
vmax = i
break
if vmax != vmin:
for i in range(image_channel.shape[0]):
for j in range(image_channel.shape[1]):
pix = image_channel.item(i, j)
if pix < vmin:
image_channel.itemset((i, j), vmin)
elif pix > vmax:
image_channel.itemset((i, j), vmax)
vmin_ij = image_channel.item(i, j) - vmin
image_channel.itemset((i, j), vmin_ij * 255 / (vmax-vmin))
return image_channel
if __name__ == '__main__':
pass
|
py | 1a35c06b0bd43664687819af1de2c7fcfc47f821 | #!/usr/bin/env python
from __future__ import division
import json
import sys
from dateutil import parser
def getCurrentTime(line):
# Format: 2014-08-10T12:50:02.849167+00:00
try:
dcl = json.loads(line)
time = dcl['time']
#datetime_format = str('%Y-%m-%dT%H:%M:%S.%f%z')
#currentTime = datetime.datetime.strptime(time, datetime_format)
currentTime = parser.parse(time)
#print "time", currentTime
except ValueError:
print "[Error]: Could not find time"
return currentTime
if __name__ == '__main__':
fileToCheck = sys.argv[1]
print fileToCheck
jfile = open(fileToCheck, 'r')
currentTime = 0
prevTime = 0
wrongTimeOrder = 0
lineCnt = 0
for line in jfile:
if (lineCnt>0):
currentTime = getCurrentTime(line)
else:
currentTime = getCurrentTime(line)
prevTime = currentTime
if (currentTime < prevTime):
wrongTimeOrder += 1
#print "[Error]: Previous Event is newer than Current Event: ", prevTime, "(Previous) vs. ", currentTime, "(Current)"
lineCnt += 1
prevTime = currentTime
percentUnordered = (wrongTimeOrder) / lineCnt
print "Total Events: ", lineCnt
print "Total Misordered Time Events: ", wrongTimeOrder, " (", percentUnordered, "% of total)"
|
py | 1a35c0e6c9abbdf2246eafd756efdf3ac04ff479 | import sys
def _up_to(args):
try:
n_str = args[1]
return int(n_str) + 1
except:
return 25
def main(up_to):
for n in mod_3(range(1, up_to)):
print(n)
def mod_3(numbers):
for number in numbers:
if number % 3 == 0:
yield "Mod3"
else:
yield f"{number}"
if __name__ == "__main__":
up_to = _up_to(sys.argv)
main(up_to)
|
py | 1a35c1449da17d557d2325f89e55efb34cc7f3f2 | # Copyright 2013-2014 eNovance <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2015 Wind River Systems, Inc.
#
import fixtures
import glanceclient
import mock
import novaclient
from oslotest import base
from ceilometer import nova_client
from ceilometer import service
class TestNovaClient(base.BaseTestCase):
def setUp(self):
super(TestNovaClient, self).setUp()
self.CONF = service.prepare_service([], [])
self._flavors_count = 0
self._images_count = 0
self.nv = nova_client.Client(self.CONF)
self.useFixture(fixtures.MockPatchObject(
self.nv.nova_client.flavors, 'get',
side_effect=self.fake_flavors_get))
self.useFixture(fixtures.MockPatchObject(
self.nv.glance_client.images, 'get',
side_effect=self.fake_images_get))
def fake_flavors_get(self, *args, **kwargs):
self._flavors_count += 1
a = mock.MagicMock()
a.id = args[0]
if a.id == 1:
a.name = 'm1.tiny'
elif a.id == 2:
a.name = 'm1.large'
else:
raise novaclient.exceptions.NotFound('foobar')
return a
def fake_images_get(self, *args, **kwargs):
self._images_count += 1
a = mock.MagicMock()
a.id = args[0]
image_details = {
1: ('ubuntu-12.04-x86', dict(kernel_id=11, ramdisk_id=21)),
2: ('centos-5.4-x64', dict(kernel_id=12, ramdisk_id=22)),
3: ('rhel-6-x64', None),
4: ('rhel-6-x64', dict()),
5: ('rhel-6-x64', dict(kernel_id=11)),
6: ('rhel-6-x64', dict(ramdisk_id=21))
}
if a.id in image_details:
a.name = image_details[a.id][0]
a.metadata = image_details[a.id][1]
else:
raise glanceclient.exc.HTTPNotFound('foobar')
return a
@staticmethod
def fake_servers_list(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 1}
a.image = {'id': 1}
b = mock.MagicMock()
b.id = 43
b.flavor = {'id': 2}
b.image = {'id': 2}
return [a, b]
def test_instance_get_all_by_host(self):
with mock.patch.object(self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list):
instances = self.nv.instance_get_all_by_host('foobar')
self.assertEqual(2, len(instances))
self.assertEqual('m1.tiny', instances[0].flavor['name'])
self.assertEqual('ubuntu-12.04-x86', instances[0].image['name'])
self.assertEqual(11, instances[0].kernel_id)
self.assertEqual(21, instances[0].ramdisk_id)
def test_instance_get_all(self):
with mock.patch.object(self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list):
instances = self.nv.instance_get_all()
self.assertEqual(2, len(instances))
self.assertEqual(42, instances[0].id)
self.assertEqual(1, instances[0].flavor['id'])
self.assertEqual(1, instances[0].image['id'])
@staticmethod
def fake_servers_list_unknown_flavor(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 666}
a.image = {'id': 1}
return [a]
def test_instance_get_all_by_host_unknown_flavor(self):
with mock.patch.object(
self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list_unknown_flavor):
instances = self.nv.instance_get_all_by_host('foobar')
self.assertEqual(1, len(instances))
self.assertEqual('unknown-id-666', instances[0].flavor['name'])
@staticmethod
def fake_servers_list_unknown_image(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 1}
a.image = {'id': 666}
return [a]
@staticmethod
def fake_servers_list_image_missing_metadata(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 1}
a.image = {'id': args[0]}
return [a]
@staticmethod
def fake_instance_image_missing(*args, **kwargs):
a = mock.MagicMock()
a.id = 42
a.flavor = {'id': 666}
a.image = None
return [a]
def test_instance_get_all_by_host_unknown_image(self):
with mock.patch.object(
self.nv.nova_client.servers, 'list',
side_effect=self.fake_servers_list_unknown_image):
instances = self.nv.instance_get_all_by_host('foobar')
self.assertEqual(1, len(instances))
self.assertEqual('unknown-id-666', instances[0].image['name'])
def test_with_flavor_and_image(self):
results = self.nv._with_flavor_and_image(self.fake_servers_list())
instance = results[0]
self.assertEqual(2, len(results))
self.assertEqual('ubuntu-12.04-x86', instance.image['name'])
self.assertEqual('m1.tiny', instance.flavor['name'])
self.assertEqual(11, instance.kernel_id)
self.assertEqual(21, instance.ramdisk_id)
def test_with_flavor_and_image_unknown_image(self):
instances = self.fake_servers_list_unknown_image()
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertEqual('unknown-id-666', instance.image['name'])
self.assertNotEqual(instance.flavor['name'], 'unknown-id-666')
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_unknown_flavor(self):
instances = self.fake_servers_list_unknown_flavor()
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertEqual('unknown-id-666', instance.flavor['name'])
self.assertEqual(0, instance.flavor['vcpus'])
self.assertEqual(0, instance.flavor['ram'])
self.assertEqual(0, instance.flavor['disk'])
self.assertNotEqual(instance.image['name'], 'unknown-id-666')
self.assertEqual(11, instance.kernel_id)
self.assertEqual(21, instance.ramdisk_id)
def test_with_flavor_and_image_none_metadata(self):
instances = self.fake_servers_list_image_missing_metadata(3)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_missing_metadata(self):
instances = self.fake_servers_list_image_missing_metadata(4)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_missing_ramdisk(self):
instances = self.fake_servers_list_image_missing_metadata(5)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertEqual(11, instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_flavor_and_image_missing_kernel(self):
instances = self.fake_servers_list_image_missing_metadata(6)
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertEqual(21, instance.ramdisk_id)
def test_with_flavor_and_image_no_cache(self):
results = self.nv._with_flavor_and_image(self.fake_servers_list())
self.assertEqual(2, len(results))
self.assertEqual(2, self._flavors_count)
self.assertEqual(2, self._images_count)
def test_with_flavor_and_image_cache(self):
results = self.nv._with_flavor_and_image(self.fake_servers_list() * 2)
self.assertEqual(4, len(results))
self.assertEqual(2, self._flavors_count)
self.assertEqual(2, self._images_count)
def test_with_flavor_and_image_unknown_image_cache(self):
instances = self.fake_servers_list_unknown_image()
results = self.nv._with_flavor_and_image(instances * 2)
self.assertEqual(2, len(results))
self.assertEqual(1, self._flavors_count)
self.assertEqual(1, self._images_count)
for instance in results:
self.assertEqual('unknown-id-666', instance.image['name'])
self.assertNotEqual(instance.flavor['name'], 'unknown-id-666')
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.ramdisk_id)
def test_with_missing_image_instance(self):
instances = self.fake_instance_image_missing()
results = self.nv._with_flavor_and_image(instances)
instance = results[0]
self.assertIsNone(instance.kernel_id)
self.assertIsNone(instance.image)
self.assertIsNone(instance.ramdisk_id)
def test_with_nova_http_log_debug(self):
self.CONF.set_override("nova_http_log_debug", True)
self.nv = nova_client.Client(self.CONF)
self.assertIsNotNone(self.nv.nova_client.client.logger)
def test_with_max_timing_buffer(self):
self.CONF.set_override("max_timing_buffer", 300)
self.nv = nova_client.Client(self.CONF)
# TO DO (dbadea): remove condition after updating nova_client
if hasattr(self.nv.nova_client, 'get_timings_max_len'):
self.assertEqual(300, self.nv.nova_client.get_timings_max_len())
|
py | 1a35c1d315692ad5ca50178edba5c6584bd964ad | import os
import datetime
import sys
import logging
from flask import Flask, render_template
from logging.config import dictConfig
from werkzeug.middleware.dispatcher import DispatcherMiddleware
from prometheus_client import make_wsgi_app, Summary, Counter
dictConfig({
'version': 1,
'formatters': {'default': {
'format': '[%(asctime)s] %(levelname)s in %(module)s: %(message)s',
}},
'handlers': {'wsgi': {
'class': 'logging.StreamHandler',
'stream': 'ext://sys.stdout',
'formatter': 'default'
}},
'root': {
'level': 'INFO',
'handlers': ['wsgi']
}
})
c = Counter('my_failures', 'Description of counter')
app = Flask(__name__)
@app.route('/')
def index():
app.logger.info('Request at %s ', datetime.datetime.now())
return render_template('index.html')
app.wsgi_app = DispatcherMiddleware(app.wsgi_app, {
'/metrics': make_wsgi_app()
})
if __name__ == "__main__":
app.run(debug=True,host='0.0.0.0',port=int(os.environ.get('PORT', 8080)))
|
py | 1a35c1e0b79d4eadb1d7a7225d71f487ce672434 | from src.main import mathmatic |
py | 1a35c1fd03115a212d287ec19cab4be2dcf19629 | import logging
from multiprocessing import cpu_count, Pool
from bg_backend.bitglitter.config.palettefunctions import _return_palette
from bg_backend.bitglitter.utilities.filemanipulation import create_default_output_folder
from bg_backend.bitglitter.utilities.gui.messages import write_frame_count_http, write_save_path_http
from bg_backend.bitglitter.write.render.headerencode import metadata_header_encode, custom_palette_header_encode, \
stream_header_encode
from bg_backend.bitglitter.write.render.framestategenerator import frame_state_generator
from bg_backend.bitglitter.write.render.renderutilities import draw_frame, total_frames_estimator
class RenderHandler:
def __init__(self,
# Basic Setup
stream_name, stream_description, working_dir, default_output_path, crypto_key, scrypt_n, scrypt_r,
scrypt_p,
# Stream Rendering
block_height, block_width, pixel_width, stream_palette_id, max_cpu_cores,
# Header
stream_sha256, size_in_bytes, compression_enabled, encryption_enabled, file_mask_enabled,
datetime_started, bg_version, manifest, protocol_version,
# Render Output
output_mode, output_path, stream_name_file_output,
# Statistics
save_statistics
):
self.blocks_wrote = 0
self.total_frames = 0
write_save_path_http(str(output_path))
# Pre render
logging.info('Beginning pre-render processes...')
create_default_output_folder(default_output_path)
initializer_palette = _return_palette(palette_id='1')
initializer_palette_b = _return_palette('11')
stream_palette = _return_palette(palette_id=stream_palette_id)
initializer_palette_dict = initializer_palette.return_encoder()
initializer_palette_dict_b = initializer_palette_b.return_encoder()
stream_palette_dict = stream_palette.return_encoder()
metadata_header_bytes, metadata_header_hash_bytes = metadata_header_encode(file_mask_enabled, crypto_key,
scrypt_n, scrypt_r, scrypt_p,
bg_version, stream_name,
datetime_started,
stream_description, manifest)
palette_header_bytes = b''
palette_header_hash_bytes = b''
if stream_palette.is_custom:
palette_header_bytes, palette_header_hash_bytes = custom_palette_header_encode(stream_palette)
self.total_frames = total_frames_estimator(block_height, block_width, len(metadata_header_bytes),
len(palette_header_bytes), size_in_bytes, stream_palette,
output_mode)
write_frame_count_http(self.total_frames)
stream_header = stream_header_encode(size_in_bytes, self.total_frames, compression_enabled,
encryption_enabled, file_mask_enabled, len(metadata_header_bytes),
metadata_header_hash_bytes, len(palette_header_bytes),
palette_header_hash_bytes)
logging.info('Pre-render complete.')
# Render
if max_cpu_cores == 0 or max_cpu_cores >= cpu_count():
pool_size = cpu_count()
else:
pool_size = max_cpu_cores
self.total_operations = self.total_frames * (1 + int(output_mode != 'image'))
with Pool(processes=pool_size) as worker_pool:
logging.info(f'Beginning rendering on {pool_size} CPU core(s)...')
count = 1
for frame_encode in worker_pool.imap(draw_frame, frame_state_generator(block_height, block_width,
pixel_width, protocol_version, initializer_palette, stream_palette,
output_mode, output_path, stream_name_file_output, working_dir,
self.total_frames, stream_header, metadata_header_bytes,
palette_header_bytes, stream_sha256, initializer_palette_dict,
initializer_palette_dict_b, stream_palette_dict, default_output_path,
stream_name, save_statistics, self.total_operations), chunksize=1):
pass
logging.info('Rendering frames complete.')
|
py | 1a35c3f2d1f676e3d6a8cb5737d25b61e1486815 | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from a2c_ppo_acktr.distributions import Bernoulli, Categorical, DiagGaussian
from a2c_ppo_acktr.utils import init, init_null
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class Policy(nn.Module):
def __init__(self, obs_shape, action_space, base, base_kwargs=None):
super(Policy, self).__init__()
self.base = base
if action_space.__class__.__name__ == "Discrete":
num_outputs = action_space.n
self.dist = Categorical(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "Box":
num_outputs = action_space.shape[0]
self.dist = DiagGaussian(self.base.output_size, num_outputs)
elif action_space.__class__.__name__ == "MultiBinary":
num_outputs = action_space.shape[0]
self.dist = Bernoulli(self.base.output_size, num_outputs)
else:
raise NotImplementedError
@property
def is_recurrent(self):
return self.base.is_recurrent
@property
def recurrent_hidden_state_size(self):
"""Size of rnn_hx."""
return self.base.recurrent_hidden_state_size
def forward(self, inputs, rnn_hxs, masks):
raise NotImplementedError
def act(self, inputs, rnn_hxs, masks, deterministic=False, eps=0., rand_action_mask=None,
rand_actions=None):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
if deterministic:
action = dist.mode()
else:
action = dist.sample()
if rand_action_mask is not None and rand_action_mask.sum() > 0:
if rand_actions is None:
action[rand_action_mask] = torch.randint(0, dist.probs.size(1),
(rand_action_mask.sum(),),
device=action.device)
else:
action[rand_action_mask] = rand_actions
elif eps > 0:
rand_act = torch.rand(dist.probs.size(0), 1) < eps
action[rand_act] = torch.randint(0, dist.probs.size(1), (rand_act.sum(),),
device=action.device)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action, action_log_probs, rnn_hxs
def get_value(self, inputs, rnn_hxs, masks):
value, _, _ = self.base(inputs, rnn_hxs, masks)
return value
def evaluate_actions(self, inputs, rnn_hxs, masks, action):
value, actor_features, rnn_hxs = self.base(inputs, rnn_hxs, masks)
dist = self.dist(actor_features)
action_log_probs = dist.log_probs(action)
dist_entropy = dist.entropy().mean()
return value, action_log_probs, dist_entropy, rnn_hxs
class NNBase(nn.Module):
def __init__(self, recurrent, recurrent_input_size, hidden_size):
super(NNBase, self).__init__()
self._hidden_size = hidden_size
self._recurrent = recurrent
if recurrent:
self.gru = nn.GRU(recurrent_input_size, hidden_size)
for name, param in self.gru.named_parameters():
if 'bias' in name:
nn.init.constant_(param, 0)
elif 'weight' in name:
nn.init.orthogonal_(param)
@property
def is_recurrent(self):
return self._recurrent
@property
def recurrent_hidden_state_size(self):
if self._recurrent:
return self._hidden_size
return 1
@property
def output_size(self):
return self._hidden_size
def _forward_gru(self, x, hxs, masks):
if x.size(0) == hxs.size(0):
x, hxs = self.gru(x.unsqueeze(0), (hxs * masks).unsqueeze(0))
x = x.squeeze(0)
hxs = hxs.squeeze(0)
else:
# x is a (T, N, -1) tensor that has been flatten to (T * N, -1)
N = hxs.size(0)
T = int(x.size(0) / N)
# unflatten
x = x.view(T, N, x.size(1))
# Same deal with masks
masks = masks.view(T, N)
# Let's figure out which steps in the sequence have a zero for any agent
# We will always assume t=0 has a zero in it as that makes the logic cleaner
has_zeros = ((masks[1:] == 0.0) \
.any(dim=-1)
.nonzero()
.squeeze()
.cpu())
# +1 to correct the masks[1:]
if has_zeros.dim() == 0:
# Deal with scalar
has_zeros = [has_zeros.item() + 1]
else:
has_zeros = (has_zeros + 1).numpy().tolist()
# add t=0 and t=T to the list
has_zeros = [0] + has_zeros + [T]
hxs = hxs.unsqueeze(0)
outputs = []
for i in range(len(has_zeros) - 1):
# We can now process steps that don't have any zeros in masks together!
# This is much faster
start_idx = has_zeros[i]
end_idx = has_zeros[i + 1]
rnn_scores, hxs = self.gru(
x[start_idx:end_idx],
hxs * masks[start_idx].view(1, -1, 1))
outputs.append(rnn_scores)
# assert len(outputs) == T
# x is a (T, N, -1) tensor
x = torch.cat(outputs, dim=0)
# flatten
x = x.view(T * N, -1)
hxs = hxs.squeeze(0)
return x, hxs
class CNNBase(NNBase):
def __init__(self, cfg, obs_space, action_space):
num_inputs = obs_space[0]
recurrent = cfg.recurrent
hidden_size = cfg.hidden_size
use_init = cfg.use_init
super(CNNBase, self).__init__(recurrent, hidden_size, hidden_size)
if use_init:
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
else:
init_ = lambda m: init_null(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.main = nn.Sequential(
init_(nn.Conv2d(num_inputs, 32, 8, stride=4)), nn.ReLU(),
init_(nn.Conv2d(32, 64, 4, stride=2)), nn.ReLU(),
init_(nn.Conv2d(64, 32, 3, stride=1)), nn.ReLU(), Flatten(),
init_(nn.Linear(32 * 7 * 7, hidden_size)), nn.ReLU())
if use_init:
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
else:
init_ = lambda m: init_null(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0))
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = self.main(inputs / 255.0)
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
return self.critic_linear(x), x, rnn_hxs
class MLPBase(NNBase):
def __init__(self, cfg, obs_space, action_space):
num_inputs = obs_space[0]
recurrent = cfg.recurrent
hidden_size = cfg.hidden_size
super(MLPBase, self).__init__(recurrent, num_inputs, hidden_size)
if recurrent:
num_inputs = hidden_size
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), np.sqrt(2))
self.actor = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic = nn.Sequential(
init_(nn.Linear(num_inputs, hidden_size)), nn.Tanh(),
init_(nn.Linear(hidden_size, hidden_size)), nn.Tanh())
self.critic_linear = init_(nn.Linear(hidden_size, 1))
self.train()
def forward(self, inputs, rnn_hxs, masks):
x = inputs
if self.is_recurrent:
x, rnn_hxs = self._forward_gru(x, rnn_hxs, masks)
hidden_critic = self.critic(x)
hidden_actor = self.actor(x)
return self.critic_linear(hidden_critic), hidden_actor, rnn_hxs
|
py | 1a35c44e34a5f9139865a248dbb25f3ae2324db1 | from .activity import Activity, CashPayment, Trade, TradeFlags
from .balance import AccountBalance
from .cash import Currency, Cash
from .instrument import (
Instrument,
Stock,
Bond,
Option,
OptionType,
FutureOption,
Future,
Forex,
)
from .quote import Quote
from .position import Position
from . import converter
__all__ = [
"AccountBalance",
"Activity",
"Bond",
"Cash",
"CashPayment",
"converter",
"Currency",
"Forex",
"Future",
"FutureOption",
"Instrument",
"Option",
"OptionType",
"Position",
"Quote",
"Stock",
"Trade",
"TradeFlags",
]
|
py | 1a35c4510611dfd4f9f493f1365fadfcbd017a74 | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
"""Tests pertaining to line/branch test coverage for the Firecracker code base.
# TODO
- Put the coverage in `s3://spec.firecracker` and update it automatically.
target should be put in `s3://spec.firecracker` and automatically updated.
"""
import os
import platform
import re
import pytest
import framework.utils as utils
import host_tools.cargo_build as host # pylint: disable=import-error
import host_tools.proc as proc
# AMD has a slightly different coverage due to
# the appearance of the brand string. On Intel,
# this contains the frequency while on AMD it does not.
# Checkout the cpuid crate. In the future other
# differences may appear.
COVERAGE_DICT = {"Intel": 85.12, "AMD": 84.35, "ARM": 83.18}
PROC_MODEL = proc.proc_type()
COVERAGE_MAX_DELTA = 0.05
CARGO_KCOV_REL_PATH = os.path.join(host.CARGO_BUILD_REL_PATH, 'kcov')
KCOV_COVERAGE_FILE = 'index.js'
"""kcov will aggregate coverage data in this file."""
KCOV_COVERED_LINES_REGEX = r'"covered_lines":"(\d+)"'
"""Regex for extracting number of total covered lines found by kcov."""
KCOV_TOTAL_LINES_REGEX = r'"total_lines" : "(\d+)"'
"""Regex for extracting number of total executable lines found by kcov."""
@pytest.mark.timeout(400)
def test_coverage(test_session_root_path, test_session_tmp_path):
"""Test line coverage with kcov.
The result is extracted from the $KCOV_COVERAGE_FILE file created by kcov
after a coverage run.
"""
proc_model = [item for item in COVERAGE_DICT if item in PROC_MODEL]
assert len(proc_model) == 1, "Could not get processor model!"
coverage_target_pct = COVERAGE_DICT[proc_model[0]]
exclude_pattern = (
'${CARGO_HOME:-$HOME/.cargo/},'
'build/,'
'tests/,'
'usr/lib/gcc,'
'lib/x86_64-linux-gnu/,'
'test_utils.rs,'
# The following files/directories are auto-generated
'bootparam.rs,'
'elf.rs,'
'mpspec.rs,'
'msr_index.rs,'
'_gen'
)
exclude_region = '\'mod tests {\''
target = "{}-unknown-linux-musl".format(platform.machine())
cmd = (
'RUSTFLAGS="{}" CARGO_TARGET_DIR={} cargo kcov --all '
'--target {} --output {} -- '
'--exclude-pattern={} '
'--exclude-region={} --verify'
).format(
host.get_rustflags(),
os.path.join(test_session_root_path, CARGO_KCOV_REL_PATH),
target,
test_session_tmp_path,
exclude_pattern,
exclude_region
)
# By default, `cargo kcov` passes `--exclude-pattern=$CARGO_HOME --verify`
# to kcov. To pass others arguments, we need to include the defaults.
utils.run_cmd(cmd)
coverage_file = os.path.join(test_session_tmp_path, KCOV_COVERAGE_FILE)
with open(coverage_file) as cov_output:
contents = cov_output.read()
covered_lines = int(re.findall(KCOV_COVERED_LINES_REGEX, contents)[0])
total_lines = int(re.findall(KCOV_TOTAL_LINES_REGEX, contents)[0])
coverage = covered_lines / total_lines * 100
print("Number of executable lines: {}".format(total_lines))
print("Number of covered lines: {}".format(covered_lines))
print("Thus, coverage is: {:.2f}%".format(coverage))
coverage_low_msg = (
'Current code coverage ({:.2f}%) is below the target ({}%).'
.format(coverage, coverage_target_pct)
)
min_coverage = coverage_target_pct - COVERAGE_MAX_DELTA
assert coverage >= min_coverage, coverage_low_msg
# Get the name of the variable that needs updating.
namespace = globals()
cov_target_name = [name for name in namespace if namespace[name]
is COVERAGE_DICT][0]
coverage_high_msg = (
'Current code coverage ({:.2f}%) is above the target ({}%).\n'
'Please update the value of {}.'
.format(coverage, coverage_target_pct, cov_target_name)
)
assert coverage - coverage_target_pct <= COVERAGE_MAX_DELTA,\
coverage_high_msg
|
py | 1a35c5ff595bde34e5839651fb1975b9a2dd329d | from thespian.system.utilis import withPossibleInitArgs
class NoArgs(object):
def __init__(self):
self.ready = True
class ReqArgs(object):
def __init__(self, requirements):
self.ready = True
self.reqs = requirements
class PossibleReqArgs(object):
def __init__(self, requirements=None):
self.ready = True
self.reqs = requirements
class CapArgs(object):
def __init__(self, capabilities):
self.ready = True
self.caps = capabilities
class PossibleCapArgs(object):
def __init__(self, capabilities=None):
self.ready = True
self.caps = capabilities
class CapReqArgs(object):
def __init__(self, capabilities, requirements):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class CapPossibleReqArgs(object):
def __init__(self, capabilities, requirements=None):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class PossibleCapPossibleReqArgs(object):
def __init__(self, requirements=None, capabilities=None):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class CapFooArgs(object):
def __init__(self, foo=None, capabilities=None):
self.ready = True
self.caps = capabilities
class ReqCapFooArgs(object):
def __init__(self, requirements=None, foo=None, capabilities=None):
self.ready = True
self.reqs = requirements
self.caps = capabilities
class ReqFooArgs(object):
def __init__(self, requirements=None, foo=None):
self.ready = True
self.reqs = requirements
wpa = withPossibleInitArgs(capabilities={'caps':'here', 'capa':'bilities'},
requirements={'reqs':'requirements', 'r':True})
def test_noargs():
obj = wpa.create(NoArgs)
assert obj
assert not hasattr(obj, 'caps')
assert not hasattr(obj, 'reqs')
def test_reqargs():
obj = wpa.create(ReqArgs)
assert obj
assert not hasattr(obj, 'caps')
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_possiblereqargs():
obj = wpa.create(PossibleReqArgs)
assert obj
assert not hasattr(obj, 'caps')
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_reqfooargs():
obj = wpa.create(ReqFooArgs)
assert obj
assert not hasattr(obj, 'caps')
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_capargs():
obj = wpa.create(CapArgs)
assert obj
assert not hasattr(obj, 'reqs')
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
def test_possiblecapargs():
obj = wpa.create(PossibleCapArgs)
assert obj
assert not hasattr(obj, 'reqs')
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
def test_capfooargs():
obj = wpa.create(CapFooArgs)
assert obj
assert not hasattr(obj, 'reqs')
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
def test_capreqargs():
obj = wpa.create(CapReqArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_cappossiblereqargs():
obj = wpa.create(CapPossibleReqArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_possiblecappossiblereqargs():
obj = wpa.create(CapPossibleReqArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
def test_reqcapfooargs():
obj = wpa.create(ReqCapFooArgs)
assert obj
assert obj.caps['caps'] == 'here'
assert obj.caps['capa'] == 'bilities'
assert obj.reqs['r']
assert obj.reqs['reqs'] == 'requirements'
|
py | 1a35c6551ac870d532fa57b3c063577de2499633 | # Generated by Django 2.0.6 on 2018-12-10 23:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('university', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='university',
options={'verbose_name': 'Facultad', 'verbose_name_plural': 'Facultades'},
),
]
|
py | 1a35c7dc802666a3de19fbb1c9c6de3aac07b039 | #! /usr/bin/env python
# coding=utf-8
import os
import pandas as pd
import urllib
import xml.etree.ElementTree as ET
import io
import itertools as IT
# Copyright © 2016 Joachim Muth <[email protected]>
#
# Distributed under terms of the MIT license.
class Scraper:
"""
Scraper for parlament.ch
scraper.get(table_name): get the table, write it in csv file, return a pandas.data_frame
"""
def __init__(self, time_out=10, language='FR'):
self.tables = {'party': 'Party',
'person': 'Person',
'member_council': 'MemberCouncil',
'council': 'Council'}
self.url_base = "https://ws.parlament.ch/odata.svc/"
self.url_count = "$count"
self.url_lang_filter = "$filter=Language%20eq%20'" + language + "'"
self.folder = "data"
self.time_out = time_out
self.limit_api = 1000
def get(self, table_name):
"""
Load the table_name from parlament.ch
Write a csv file in self.folder / table_name
:return (pandas.data_frame): table
"""
table_size = self.count(table_name)
if table_name == 'BusinessRole':
df = self._inner_get_business_role(table_name)
elif table_name == 'BusinessStatus':
df = self._inner_get_big_table_skip(table_name)
elif table_size > 10000:
df = self._inner_get_big_table_ids(table_name)
elif table_size > 900:
df = self._inner_get_big_table_skip(table_name)
else:
df = self._inner_get_small_table(table_name)
self._inner_write_file(df, table_name)
return df
def count(self, table_name):
"""
Count request for parlament.ch server
:param table_name:
:return: number of entries in table_name
"""
url = self.url_base + table_name + "/$count?$filter=Language%20eq%20'FR'"
with urllib.request.urlopen(url) as response:
n = response.read()
# get the number from the bytes
n = int(str(n).split("'")[1])
return n
def _inner_get_and_parse(self, url):
"""
Send GET request to parlament.ch and parse the return XML file to a pandas.data_frame
:param url: (str) GET url request
:return: (pandas.data_frame) parsed XML
"""
print("GET:", url)
with urllib.request.urlopen(url) as url:
s = url.read()
# root = ET.fromstring(s)
root = self._inner_error_handling_xmlfromstring(s)
dict_ = {}
base = "{http://www.w3.org/2005/Atom}"
# base = self.base_url
for child in root.iter(base + 'entry'):
for children in child.iter(base + 'content'):
for properties in children:
for subject in properties:
# print(subject.text)
s = subject.tag.split('}')
if s[1] in dict_:
dict_[s[1]].append(subject.text)
else:
dict_[s[1]] = [subject.text]
data = pd.DataFrame(dict_)
return data
def _inner_error_handling_xmlfromstring(self, content):
""" Print XML if error while parsing (mainly due to server API timeout)"""
try:
tree = ET.fromstring(content)
except ET.ParseError as err:
lineno, column = err.position
line = next(IT.islice(io.BytesIO(content), lineno))
caret = '{:=>{}}'.format('^', column)
err.msg = '{}\n{}\n{}'.format(err, line, caret)
raise
return tree
def _inner_write_file(self, table, table_name):
""" Write table in csv file inside self.folder / table_name"""
self._inner_check_folder()
table.to_csv(self.folder + '/' + table_name + '.csv')
def _inner_get_big_table_skip(self, table_name):
"""
Loop URL request on table by step of 1000 and load data until reaches the end
Time Out after self.time_out iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = self.url_lang_filter
# loop parameters
limit_api = self.limit_api
data_frames = []
i = 0
top = 1000
skip = 0
while True:
url = base + table_name + '?' + "$top=" + str(top) + \
'&' + language + \
'&' + "$skip=" + str(skip)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
if df.shape == (0, 0):
break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", top)
break
data_frames.append(df)
# top += limit_api
skip += limit_api
i += 1
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
def _inner_get_big_table_ids(self, table_name):
"""
"skip" odata attribute leads to time out the parlament.ch server. Here we use id's to get directly intervals of
items.
Less safe than "skip version, because could stop if a big ID interval is not used (normally not the case)
Loop URL request on table by step of 1000 id's and load data until reaches the end
Time Out after 10 iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = self.url_lang_filter
id_from = "ID%20ge%20"
id_to = "%20and%20ID%20lt%20"
# loop parameters
limit_api = self.limit_api
data_frames = []
id_ = self._inner_get_smaller_id(table_name)
i = 0
n_downloaded = 0
expected_size = self.count(table_name)
while True:
url = base + table_name + '?' + language + '%20and%20' + id_from + str(id_) + id_to + str(id_ + limit_api)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
# if df.shape == (0, 0):
# break
# add number of elements downloaded
n_downloaded += df.shape[0]
# stop when downloaded the whole table
if n_downloaded >= expected_size:
break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", id_)
break
data_frames.append(df)
id_ += limit_api
i += 1
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
def _inner_get_small_table(self, table_name):
"""
Simple get request with language filer
:param table_name:
:return:
"""
url = self.url_base + table_name + '?' + self.url_lang_filter
df = self._inner_get_and_parse(url)
self._inner_check_size(df, table_name)
return df
def _inner_check_size(self, df, table_name):
expected_size = self.count(table_name)
if df.shape[0] != expected_size:
print("[ERROR] in scraping table", table_name, "expected size of", expected_size, "but is", df.shape[0])
else:
print("[OK] table " + table_name + " correctly scraped, df.shape = ", df.shape[0], "as expected")
def _inner_check_folder(self):
""" check if folder exists to avoid error and create it if not """
if not os.path.exists(self.folder):
os.makedirs(self.folder)
def _inner_get_smaller_id(self, table_name):
url = self.url_base + table_name + '?' + self.url_lang_filter
df = self._inner_get_and_parse(url)
return int(df.ID[0])
def _inner_get_business_role(self, table_name):
"""
Special case of Table BusinessRole which has non-trivial ID.
Filter result base on BusinessNumber (which is a random attribute) and iterate over it
At each iteratino process an _inner_get_big_table_skip method
Time Out after self.time_out iterations
:param table_name: Name of the wished table
:return: (pandas.data_frame) table
"""
# url
base = self.url_base
language = "$filter=Language%20eq%20%27FR%27"
id_ = 19000000
step_id = 10000
# id filter (too long)
id_from = "BusinessNumber%20ge%20"
id_to = "%20and%20BusinessNumber%20lt%20"
# loop parameters
data_frames = []
i = 0
top = 1000
skip = 0
limit_api = self.limit_api
while True:
while True:
url = base + table_name + '?' + "$top=" + str(top) + \
'&' + language + \
'%20and%20' + id_from + str(id_) + id_to + str(id_ + step_id) + \
'&' + "$skip=" + str(skip)
df = self._inner_get_and_parse(url)
# stop when we reach the end of the data
if df.shape == (0, 0):
break
# # stop when we reach the end of the data
# url_count = base + table_name + "/$count?" + "$top=" + str(top) + \
# '&' + language + \
# '&' + id_from + str(id_) + id_to + str(id_ + step_id) + \
# '&' + "$skip=" + str(skip)
# print(self._inner_url_count(url_count))
# if self._inner_url_count(url_count) == 0:
# break
# stop after 10 iteration to avoid swiss police to knock at our door
if i > self.time_out:
print("Loader timed out after ", i, " iterations. Data frame IDs are greater than ", top)
break
data_frames.append(df)
# top += limit_api
skip += limit_api
i += 1
if id_ > 22000000:
break
id_ += step_id
skip = 0
# concat all downloaded tables
df = pd.concat(data_frames, ignore_index=True)
# check if we really download the whole table
self._inner_check_size(df, table_name)
return df
|
py | 1a35c834fc6adee3e8a31134371ba4153a90e791 | ### Define a class to receive the characteristics of each line detection
import numpy as np
class Line( ):
def __init__(self):
# was the line detected in the last iteration?
self.detected = False
# x values of the last n fits of the line
self.recent_xfitted = [ ]
#average x values of the fitted line over the last n iterations
self.bestx = None
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
#polynomial coefficients for the most recent fit
self.current_fit = [ np.array( [ False ] ) ]
#self.current_fit = None
#radius of curvature of the line in some units
self.radius_of_curvature = None
#distance in meters of vehicle center from the line
self.line_base_pos = None
#difference in fit coefficients between last and new fits
self.diff = np.array( [ 0, 0, 0 ], dtype='float' )
#number of detected pixels
self.px_count = None
def add_fitted_line( self, fit, indices ):
# add a new fit to the line, up to n
if fit is not None:
if self.best_fit is not None:
# if a best fit, compare to previous
self.diff = abs( fit - self.best_fit )
if ( self.diff[0] > 0.001 or self.diff[1] > 1.0 or self.diff[2] > 100. ) \
and len( self.current_fit ) > 0:
# break if bad fit unless no current fits
self.detected = False
else:
self.detected = True
self.px_count = np.count_nonzero( indices )
# keep most recent fits
if len( self.current_fit ) > 5:
self.current_fit = self.current_fit[len( self.current_fit )-5:]
# clear out initial false entries
if self.current_fit == [ ] or len( self.current_fit[0] ) != 1:
self.current_fit.append( fit )
else:
self.current_fit[0] = fit
self.best_fit = np.average( self.current_fit, axis=0 )
else:
# or remove one from the history, if not found
self.detected = False
if len( self.current_fit ) > 0:
# throw out oldest fit
self.current_fit = self.current_fit[ :len( self.current_fit ) - 1 ]
if len( self.current_fit ) > 0:
# if there are still any fits in the queue, best_fit is their average
self.best_fit = np.average( self.current_fit, axis=0 )
### |
py | 1a35c9459b23d202d799ad5e26bed74a3dedcbfd | # coding=utf-8
# Copyright 2018 The Tensor2Tensor Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Data generators for translation data-sets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tarfile
from tensor2tensor.data_generators import generator_utils
from tensor2tensor.data_generators import problem
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.data_generators import text_problems
from tensor2tensor.utils import bleu_hook
from tensor2tensor.utils import mlperf_log
import tensorflow as tf
FLAGS = tf.flags.FLAGS
class TranslateProblem(text_problems.Text2TextProblem):
"""Base class for translation problems."""
def is_generate_per_split(self):
return True
@property
def approx_vocab_size(self):
return 2**15
def source_data_files(self, dataset_split):
"""Files to be passed to compile_data."""
raise NotImplementedError()
def vocab_data_files(self):
"""Files to be passed to get_or_generate_vocab."""
return self.source_data_files(problem.DatasetSplit.TRAIN)
def generate_samples(self, data_dir, tmp_dir, dataset_split):
datasets = self.source_data_files(dataset_split)
tag = "train" if dataset_split == problem.DatasetSplit.TRAIN else "dev"
data_path = compile_data(tmp_dir, datasets, "%s-compiled-%s" % (self.name,
tag))
return text_problems.text2text_txt_iterator(data_path + ".lang1",
data_path + ".lang2")
def generate_text_for_vocab(self, data_dir, tmp_dir):
return generator_utils.generate_lines_for_vocab(tmp_dir,
self.vocab_data_files())
@property
def decode_hooks(self):
return [compute_bleu_summaries]
def compute_bleu_summaries(hook_args):
"""Compute BLEU core summaries using the decoder output.
Args:
hook_args: DecodeHookArgs namedtuple
Returns:
A list of tf.Summary values if hook_args.hparams contains the
reference file and the translated file.
"""
decode_hparams = hook_args.decode_hparams
if (decode_hparams.decode_reference is None or
decode_hparams.decode_to_file is None):
return None
values = []
bleu = 100 * bleu_hook.bleu_wrapper(
decode_hparams.decode_reference, decode_hparams.decode_to_file)
values.append(tf.Summary.Value(tag="BLEU", simple_value=bleu))
tf.logging.info("%s: BLEU = %6.2f" % (decode_hparams.decode_to_file, bleu))
if hook_args.hparams.mlperf_mode:
current_step = decode_hparams.mlperf_decode_step
mlperf_log.transformer_print(
key=mlperf_log.EVAL_TARGET, value=decode_hparams.mlperf_threshold)
mlperf_log.transformer_print(
key=mlperf_log.EVAL_ACCURACY,
value={
"epoch": max(current_step // decode_hparams.iterations_per_loop - 1,
0),
"value": bleu
})
mlperf_log.transformer_print(key=mlperf_log.EVAL_STOP)
if bleu >= decode_hparams.mlperf_threshold:
decode_hparams.set_hparam("mlperf_success", True)
return values
def _preprocess_sgm(line, is_sgm):
"""Preprocessing to strip tags in SGM files."""
if not is_sgm:
return line
# In SGM files, remove <srcset ...>, <p>, <doc ...> lines.
if line.startswith("<srcset") or line.startswith("</srcset"):
return ""
if line.startswith("<doc") or line.startswith("</doc"):
return ""
if line.startswith("<p>") or line.startswith("</p>"):
return ""
# Strip <seg> tags.
line = line.strip()
if line.startswith("<seg") and line.endswith("</seg>"):
i = line.index(">")
return line[i + 1:-6] # Strip first <seg ...> and last </seg>.
def compile_data(tmp_dir, datasets, filename):
"""Concatenate all `datasets` and save to `filename`."""
filename = os.path.join(tmp_dir, filename)
lang1_fname = filename + ".lang1"
lang2_fname = filename + ".lang2"
if tf.gfile.Exists(lang1_fname) and tf.gfile.Exists(lang2_fname):
tf.logging.info("Skipping compile data, found files:\n%s\n%s", lang1_fname,
lang2_fname)
return filename
with tf.gfile.GFile(lang1_fname, mode="w") as lang1_resfile:
with tf.gfile.GFile(lang2_fname, mode="w") as lang2_resfile:
for dataset in datasets:
url = dataset[0]
compressed_filename = os.path.basename(url)
compressed_filepath = os.path.join(tmp_dir, compressed_filename)
if url.startswith("http"):
generator_utils.maybe_download(tmp_dir, compressed_filename, url)
if dataset[1][0] == "tsv":
_, src_column, trg_column, glob_pattern = dataset[1]
filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))
if not filenames:
# Capture *.tgz and *.tar.gz too.
mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
with tarfile.open(compressed_filepath, mode) as corpus_tar:
corpus_tar.extractall(tmp_dir)
filenames = tf.gfile.Glob(os.path.join(tmp_dir, glob_pattern))
for tsv_filename in filenames:
if tsv_filename.endswith(".gz"):
new_filename = tsv_filename.strip(".gz")
generator_utils.gunzip_file(tsv_filename, new_filename)
tsv_filename = new_filename
with tf.gfile.Open(tsv_filename) as tsv_file:
for line in tsv_file:
if line and "\t" in line:
parts = line.split("\t")
source, target = parts[src_column], parts[trg_column]
source, target = source.strip(), target.strip()
if source and target:
lang1_resfile.write(source)
lang1_resfile.write("\n")
lang2_resfile.write(target)
lang2_resfile.write("\n")
else:
lang1_filename, lang2_filename = dataset[1]
lang1_filepath = os.path.join(tmp_dir, lang1_filename)
lang2_filepath = os.path.join(tmp_dir, lang2_filename)
is_sgm = (
lang1_filename.endswith("sgm") and lang2_filename.endswith("sgm"))
if not (tf.gfile.Exists(lang1_filepath) and
tf.gfile.Exists(lang2_filepath)):
# For .tar.gz and .tgz files, we read compressed.
mode = "r:gz" if compressed_filepath.endswith("gz") else "r"
with tarfile.open(compressed_filepath, mode) as corpus_tar:
corpus_tar.extractall(tmp_dir)
if lang1_filepath.endswith(".gz"):
new_filepath = lang1_filepath.strip(".gz")
generator_utils.gunzip_file(lang1_filepath, new_filepath)
lang1_filepath = new_filepath
if lang2_filepath.endswith(".gz"):
new_filepath = lang2_filepath.strip(".gz")
generator_utils.gunzip_file(lang2_filepath, new_filepath)
lang2_filepath = new_filepath
for example in text_problems.text2text_txt_iterator(
lang1_filepath, lang2_filepath):
line1res = _preprocess_sgm(example["inputs"], is_sgm)
line2res = _preprocess_sgm(example["targets"], is_sgm)
if line1res and line2res:
lang1_resfile.write(line1res)
lang1_resfile.write("\n")
lang2_resfile.write(line2res)
lang2_resfile.write("\n")
return filename
class TranslateDistillProblem(TranslateProblem):
"""Base class for translation problems."""
def is_generate_per_split(self):
return True
def example_reading_spec(self):
data_fields = {"dist_targets": tf.VarLenFeature(tf.int64)}
if self.has_inputs:
data_fields["inputs"] = tf.VarLenFeature(tf.int64)
# hack: ignoring true targets and putting dist_targets in targets
data_items_to_decoders = {
"inputs": tf.contrib.slim.tfexample_decoder.Tensor("inputs"),
"targets": tf.contrib.slim.tfexample_decoder.Tensor("dist_targets"),
}
return (data_fields, data_items_to_decoders)
def get_or_create_vocab(self, data_dir, tmp_dir, force_get=False):
"""Get vocab for distill problems."""
# We assume that vocab file is present in data_dir directory where the
# data generated will be stored.
vocab_filepath = os.path.join(data_dir, self.vocab_filename)
encoder = text_encoder.SubwordTextEncoder(vocab_filepath)
return encoder
def generate_encoded_samples(self, data_dir, tmp_dir, dataset_split):
generator = self.generate_samples(data_dir, tmp_dir, dataset_split)
vocab = self.get_or_create_vocab(data_dir, tmp_dir)
# For each example, encode the text and append EOS ID.
for sample in generator:
if self.has_inputs:
sample["inputs"] = vocab.encode(sample["inputs"])
sample["inputs"].append(text_encoder.EOS_ID)
sample["targets"] = vocab.encode(sample["targets"])
sample["targets"].append(text_encoder.EOS_ID)
sample["dist_targets"] = vocab.encode(sample["dist_targets"])
sample["dist_targets"].append(text_encoder.EOS_ID)
yield sample
def generate_samples(self, data_dir, tmp_dir, dataset_split):
data_path = self.source_data_files(dataset_split)
assert tf.gfile.Exists(data_path)
return text_problems.text2text_distill_iterator(data_path + "inputs",
data_path + "gold",
data_path + "prediction")
|
py | 1a35c977f6d113ad0dd28ecccd4e6ea7065061f9 | """
Types used to represent a full function/module as an Abstract Syntax Tree.
Most types are small, and are merely used as tokens in the AST. A tree diagram
has been included below to illustrate the relationships between the AST types.
AST Type Tree
-------------
::
*Basic*
|
|
CodegenAST
|
|--->AssignmentBase
| |--->Assignment
| |--->AugmentedAssignment
| |--->AddAugmentedAssignment
| |--->SubAugmentedAssignment
| |--->MulAugmentedAssignment
| |--->DivAugmentedAssignment
| |--->ModAugmentedAssignment
|
|--->CodeBlock
|
|
|--->Token
|--->Attribute
|--->For
|--->String
| |--->QuotedString
| |--->Comment
|--->Type
| |--->IntBaseType
| | |--->_SizedIntType
| | |--->SignedIntType
| | |--->UnsignedIntType
| |--->FloatBaseType
| |--->FloatType
| |--->ComplexBaseType
| |--->ComplexType
|--->Node
| |--->Variable
| | |---> Pointer
| |--->FunctionPrototype
| |--->FunctionDefinition
|--->Element
|--->Declaration
|--->While
|--->Scope
|--->Stream
|--->Print
|--->FunctionCall
|--->BreakToken
|--->ContinueToken
|--->NoneToken
|--->Return
Predefined types
----------------
A number of ``Type`` instances are provided in the ``sympy.codegen.ast`` module
for convenience. Perhaps the two most common ones for code-generation (of numeric
codes) are ``float32`` and ``float64`` (known as single and double precision respectively).
There are also precision generic versions of Types (for which the codeprinters selects the
underlying data type at time of printing): ``real``, ``integer``, ``complex_``, ``bool_``.
The other ``Type`` instances defined are:
- ``intc``: Integer type used by C's "int".
- ``intp``: Integer type used by C's "unsigned".
- ``int8``, ``int16``, ``int32``, ``int64``: n-bit integers.
- ``uint8``, ``uint16``, ``uint32``, ``uint64``: n-bit unsigned integers.
- ``float80``: known as "extended precision" on modern x86/amd64 hardware.
- ``complex64``: Complex number represented by two ``float32`` numbers
- ``complex128``: Complex number represented by two ``float64`` numbers
Using the nodes
---------------
It is possible to construct simple algorithms using the AST nodes. Let's construct a loop applying
Newton's method::
>>> from sympy import symbols, cos
>>> from sympy.codegen.ast import While, Assignment, aug_assign, Print
>>> t, dx, x = symbols('tol delta val')
>>> expr = cos(x) - x**3
>>> whl = While(abs(dx) > t, [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx),
... Print([x])
... ])
>>> from sympy import pycode
>>> py_str = pycode(whl)
>>> print(py_str)
while (abs(delta) > tol):
delta = (val**3 - math.cos(val))/(-3*val**2 - math.sin(val))
val += delta
print(val)
>>> import math
>>> tol, val, delta = 1e-5, 0.5, float('inf')
>>> exec(py_str)
1.1121416371
0.909672693737
0.867263818209
0.865477135298
0.865474033111
>>> print('%3.1g' % (math.cos(val) - val**3))
-3e-11
If we want to generate Fortran code for the same while loop we simple call ``fcode``::
>>> from sympy import fcode
>>> print(fcode(whl, standard=2003, source_format='free'))
do while (abs(delta) > tol)
delta = (val**3 - cos(val))/(-3*val**2 - sin(val))
val = val + delta
print *, val
end do
There is a function constructing a loop (or a complete function) like this in
:mod:`sympy.codegen.algorithms`.
"""
from typing import Any, Dict as tDict, List
from collections import defaultdict
from sympy.core.relational import (Ge, Gt, Le, Lt)
from sympy.core import Symbol, Tuple, Dummy
from sympy.core.basic import Basic
from sympy.core.expr import Expr, Atom
from sympy.core.numbers import Float, Integer, oo
from sympy.core.sympify import _sympify, sympify, SympifyError
from sympy.utilities.iterables import (iterable, topological_sort,
numbered_symbols, filter_symbols)
def _mk_Tuple(args):
"""
Create a SymPy Tuple object from an iterable, converting Python strings to
AST strings.
Parameters
==========
args: iterable
Arguments to :class:`sympy.Tuple`.
Returns
=======
sympy.Tuple
"""
args = [String(arg) if isinstance(arg, str) else arg for arg in args]
return Tuple(*args)
class CodegenAST(Basic):
pass
class Token(CodegenAST):
""" Base class for the AST types.
Explanation
===========
Defining fields are set in ``__slots__``. Attributes (defined in __slots__)
are only allowed to contain instances of Basic (unless atomic, see
``String``). The arguments to ``__new__()`` correspond to the attributes in
the order defined in ``__slots__`. The ``defaults`` class attribute is a
dictionary mapping attribute names to their default values.
Subclasses should not need to override the ``__new__()`` method. They may
define a class or static method named ``_construct_<attr>`` for each
attribute to process the value passed to ``__new__()``. Attributes listed
in the class attribute ``not_in_args`` are not passed to :class:`~.Basic`.
"""
__slots__ = ()
defaults = {} # type: tDict[str, Any]
not_in_args = [] # type: List[str]
indented_args = ['body']
@property
def is_Atom(self):
return len(self.__slots__) == 0
@classmethod
def _get_constructor(cls, attr):
""" Get the constructor function for an attribute by name. """
return getattr(cls, '_construct_%s' % attr, lambda x: x)
@classmethod
def _construct(cls, attr, arg):
""" Construct an attribute value from argument passed to ``__new__()``. """
# arg may be ``NoneToken()``, so comparation is done using == instead of ``is`` operator
if arg == None:
return cls.defaults.get(attr, none)
else:
if isinstance(arg, Dummy): # SymPy's replace uses Dummy instances
return arg
else:
return cls._get_constructor(attr)(arg)
def __new__(cls, *args, **kwargs):
# Pass through existing instances when given as sole argument
if len(args) == 1 and not kwargs and isinstance(args[0], cls):
return args[0]
if len(args) > len(cls.__slots__):
raise ValueError("Too many arguments (%d), expected at most %d" % (len(args), len(cls.__slots__)))
attrvals = []
# Process positional arguments
for attrname, argval in zip(cls.__slots__, args):
if attrname in kwargs:
raise TypeError('Got multiple values for attribute %r' % attrname)
attrvals.append(cls._construct(attrname, argval))
# Process keyword arguments
for attrname in cls.__slots__[len(args):]:
if attrname in kwargs:
argval = kwargs.pop(attrname)
elif attrname in cls.defaults:
argval = cls.defaults[attrname]
else:
raise TypeError('No value for %r given and attribute has no default' % attrname)
attrvals.append(cls._construct(attrname, argval))
if kwargs:
raise ValueError("Unknown keyword arguments: %s" % ' '.join(kwargs))
# Parent constructor
basic_args = [
val for attr, val in zip(cls.__slots__, attrvals)
if attr not in cls.not_in_args
]
obj = CodegenAST.__new__(cls, *basic_args)
# Set attributes
for attr, arg in zip(cls.__slots__, attrvals):
setattr(obj, attr, arg)
return obj
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
for attr in self.__slots__:
if getattr(self, attr) != getattr(other, attr):
return False
return True
def _hashable_content(self):
return tuple([getattr(self, attr) for attr in self.__slots__])
def __hash__(self):
return super().__hash__()
def _joiner(self, k, indent_level):
return (',\n' + ' '*indent_level) if k in self.indented_args else ', '
def _indented(self, printer, k, v, *args, **kwargs):
il = printer._context['indent_level']
def _print(arg):
if isinstance(arg, Token):
return printer._print(arg, *args, joiner=self._joiner(k, il), **kwargs)
else:
return printer._print(arg, *args, **kwargs)
if isinstance(v, Tuple):
joined = self._joiner(k, il).join([_print(arg) for arg in v.args])
if k in self.indented_args:
return '(\n' + ' '*il + joined + ',\n' + ' '*(il - 4) + ')'
else:
return ('({0},)' if len(v.args) == 1 else '({0})').format(joined)
else:
return _print(v)
def _sympyrepr(self, printer, *args, joiner=', ', **kwargs):
from sympy.printing.printer import printer_context
exclude = kwargs.get('exclude', ())
values = [getattr(self, k) for k in self.__slots__]
indent_level = printer._context.get('indent_level', 0)
arg_reprs = []
for i, (attr, value) in enumerate(zip(self.__slots__, values)):
if attr in exclude:
continue
# Skip attributes which have the default value
if attr in self.defaults and value == self.defaults[attr]:
continue
ilvl = indent_level + 4 if attr in self.indented_args else 0
with printer_context(printer, indent_level=ilvl):
indented = self._indented(printer, attr, value, *args, **kwargs)
arg_reprs.append(('{1}' if i == 0 else '{0}={1}').format(attr, indented.lstrip()))
return "{}({})".format(self.__class__.__name__, joiner.join(arg_reprs))
_sympystr = _sympyrepr
def __repr__(self): # sympy.core.Basic.__repr__ uses sstr
from sympy.printing import srepr
return srepr(self)
def kwargs(self, exclude=(), apply=None):
""" Get instance's attributes as dict of keyword arguments.
Parameters
==========
exclude : collection of str
Collection of keywords to exclude.
apply : callable, optional
Function to apply to all values.
"""
kwargs = {k: getattr(self, k) for k in self.__slots__ if k not in exclude}
if apply is not None:
return {k: apply(v) for k, v in kwargs.items()}
else:
return kwargs
class BreakToken(Token):
""" Represents 'break' in C/Python ('exit' in Fortran).
Use the premade instance ``break_`` or instantiate manually.
Examples
========
>>> from sympy import ccode, fcode
>>> from sympy.codegen.ast import break_
>>> ccode(break_)
'break'
>>> fcode(break_, source_format='free')
'exit'
"""
break_ = BreakToken()
class ContinueToken(Token):
""" Represents 'continue' in C/Python ('cycle' in Fortran)
Use the premade instance ``continue_`` or instantiate manually.
Examples
========
>>> from sympy import ccode, fcode
>>> from sympy.codegen.ast import continue_
>>> ccode(continue_)
'continue'
>>> fcode(continue_, source_format='free')
'cycle'
"""
continue_ = ContinueToken()
class NoneToken(Token):
""" The AST equivalence of Python's NoneType
The corresponding instance of Python's ``None`` is ``none``.
Examples
========
>>> from sympy.codegen.ast import none, Variable
>>> from sympy import pycode
>>> print(pycode(Variable('x').as_Declaration(value=none)))
x = None
"""
def __eq__(self, other):
return other is None or isinstance(other, NoneToken)
def _hashable_content(self):
return ()
def __hash__(self):
return super().__hash__()
none = NoneToken()
class AssignmentBase(CodegenAST):
""" Abstract base class for Assignment and AugmentedAssignment.
Attributes:
===========
op : str
Symbol for assignment operator, e.g. "=", "+=", etc.
"""
def __new__(cls, lhs, rhs):
lhs = _sympify(lhs)
rhs = _sympify(rhs)
cls._check_args(lhs, rhs)
return super().__new__(cls, lhs, rhs)
@property
def lhs(self):
return self.args[0]
@property
def rhs(self):
return self.args[1]
@classmethod
def _check_args(cls, lhs, rhs):
""" Check arguments to __new__ and raise exception if any problems found.
Derived classes may wish to override this.
"""
from sympy.matrices.expressions.matexpr import (
MatrixElement, MatrixSymbol)
from sympy.tensor.indexed import Indexed
# Tuple of things that can be on the lhs of an assignment
assignable = (Symbol, MatrixSymbol, MatrixElement, Indexed, Element, Variable)
if not isinstance(lhs, assignable):
raise TypeError("Cannot assign to lhs of type %s." % type(lhs))
# Indexed types implement shape, but don't define it until later. This
# causes issues in assignment validation. For now, matrices are defined
# as anything with a shape that is not an Indexed
lhs_is_mat = hasattr(lhs, 'shape') and not isinstance(lhs, Indexed)
rhs_is_mat = hasattr(rhs, 'shape') and not isinstance(rhs, Indexed)
# If lhs and rhs have same structure, then this assignment is ok
if lhs_is_mat:
if not rhs_is_mat:
raise ValueError("Cannot assign a scalar to a matrix.")
elif lhs.shape != rhs.shape:
raise ValueError("Dimensions of lhs and rhs do not align.")
elif rhs_is_mat and not lhs_is_mat:
raise ValueError("Cannot assign a matrix to a scalar.")
class Assignment(AssignmentBase):
"""
Represents variable assignment for code generation.
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols, MatrixSymbol, Matrix
>>> from sympy.codegen.ast import Assignment
>>> x, y, z = symbols('x, y, z')
>>> Assignment(x, y)
Assignment(x, y)
>>> Assignment(x, 0)
Assignment(x, 0)
>>> A = MatrixSymbol('A', 1, 3)
>>> mat = Matrix([x, y, z]).T
>>> Assignment(A, mat)
Assignment(A, Matrix([[x, y, z]]))
>>> Assignment(A[0, 1], x)
Assignment(A[0, 1], x)
"""
op = ':='
class AugmentedAssignment(AssignmentBase):
"""
Base class for augmented assignments.
Attributes:
===========
binop : str
Symbol for binary operation being applied in the assignment, such as "+",
"*", etc.
"""
binop = None # type: str
@property
def op(self):
return self.binop + '='
class AddAugmentedAssignment(AugmentedAssignment):
binop = '+'
class SubAugmentedAssignment(AugmentedAssignment):
binop = '-'
class MulAugmentedAssignment(AugmentedAssignment):
binop = '*'
class DivAugmentedAssignment(AugmentedAssignment):
binop = '/'
class ModAugmentedAssignment(AugmentedAssignment):
binop = '%'
# Mapping from binary op strings to AugmentedAssignment subclasses
augassign_classes = {
cls.binop: cls for cls in [
AddAugmentedAssignment, SubAugmentedAssignment, MulAugmentedAssignment,
DivAugmentedAssignment, ModAugmentedAssignment
]
}
def aug_assign(lhs, op, rhs):
"""
Create 'lhs op= rhs'.
Explanation
===========
Represents augmented variable assignment for code generation. This is a
convenience function. You can also use the AugmentedAssignment classes
directly, like AddAugmentedAssignment(x, y).
Parameters
==========
lhs : Expr
SymPy object representing the lhs of the expression. These should be
singular objects, such as one would use in writing code. Notable types
include Symbol, MatrixSymbol, MatrixElement, and Indexed. Types that
subclass these types are also supported.
op : str
Operator (+, -, /, \\*, %).
rhs : Expr
SymPy object representing the rhs of the expression. This can be any
type, provided its shape corresponds to that of the lhs. For example,
a Matrix type can be assigned to MatrixSymbol, but not to Symbol, as
the dimensions will not align.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import aug_assign
>>> x, y = symbols('x, y')
>>> aug_assign(x, '+', y)
AddAugmentedAssignment(x, y)
"""
if op not in augassign_classes:
raise ValueError("Unrecognized operator %s" % op)
return augassign_classes[op](lhs, rhs)
class CodeBlock(CodegenAST):
"""
Represents a block of code.
Explanation
===========
For now only assignments are supported. This restriction will be lifted in
the future.
Useful attributes on this object are:
``left_hand_sides``:
Tuple of left-hand sides of assignments, in order.
``left_hand_sides``:
Tuple of right-hand sides of assignments, in order.
``free_symbols``: Free symbols of the expressions in the right-hand sides
which do not appear in the left-hand side of an assignment.
Useful methods on this object are:
``topological_sort``:
Class method. Return a CodeBlock with assignments
sorted so that variables are assigned before they
are used.
``cse``:
Return a new CodeBlock with common subexpressions eliminated and
pulled out as assignments.
Examples
========
>>> from sympy import symbols, ccode
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y = symbols('x y')
>>> c = CodeBlock(Assignment(x, 1), Assignment(y, x + 1))
>>> print(ccode(c))
x = 1;
y = x + 1;
"""
def __new__(cls, *args):
left_hand_sides = []
right_hand_sides = []
for i in args:
if isinstance(i, Assignment):
lhs, rhs = i.args
left_hand_sides.append(lhs)
right_hand_sides.append(rhs)
obj = CodegenAST.__new__(cls, *args)
obj.left_hand_sides = Tuple(*left_hand_sides)
obj.right_hand_sides = Tuple(*right_hand_sides)
return obj
def __iter__(self):
return iter(self.args)
def _sympyrepr(self, printer, *args, **kwargs):
il = printer._context.get('indent_level', 0)
joiner = ',\n' + ' '*il
joined = joiner.join(map(printer._print, self.args))
return ('{}(\n'.format(' '*(il-4) + self.__class__.__name__,) +
' '*il + joined + '\n' + ' '*(il - 4) + ')')
_sympystr = _sympyrepr
@property
def free_symbols(self):
return super().free_symbols - set(self.left_hand_sides)
@classmethod
def topological_sort(cls, assignments):
"""
Return a CodeBlock with topologically sorted assignments so that
variables are assigned before they are used.
Examples
========
The existing order of assignments is preserved as much as possible.
This function assumes that variables are assigned to only once.
This is a class constructor so that the default constructor for
CodeBlock can error when variables are used before they are assigned.
Examples
========
>>> from sympy import symbols
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y, z = symbols('x y z')
>>> assignments = [
... Assignment(x, y + z),
... Assignment(y, z + 1),
... Assignment(z, 2),
... ]
>>> CodeBlock.topological_sort(assignments)
CodeBlock(
Assignment(z, 2),
Assignment(y, z + 1),
Assignment(x, y + z)
)
"""
if not all(isinstance(i, Assignment) for i in assignments):
# Will support more things later
raise NotImplementedError("CodeBlock.topological_sort only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in assignments):
raise NotImplementedError("CodeBlock.topological_sort does not yet work with AugmentedAssignments")
# Create a graph where the nodes are assignments and there is a directed edge
# between nodes that use a variable and nodes that assign that
# variable, like
# [(x := 1, y := x + 1), (x := 1, z := y + z), (y := x + 1, z := y + z)]
# If we then topologically sort these nodes, they will be in
# assignment order, like
# x := 1
# y := x + 1
# z := y + z
# A = The nodes
#
# enumerate keeps nodes in the same order they are already in if
# possible. It will also allow us to handle duplicate assignments to
# the same variable when those are implemented.
A = list(enumerate(assignments))
# var_map = {variable: [nodes for which this variable is assigned to]}
# like {x: [(1, x := y + z), (4, x := 2 * w)], ...}
var_map = defaultdict(list)
for node in A:
i, a = node
var_map[a.lhs].append(node)
# E = Edges in the graph
E = []
for dst_node in A:
i, a = dst_node
for s in a.rhs.free_symbols:
for src_node in var_map[s]:
E.append((src_node, dst_node))
ordered_assignments = topological_sort([A, E])
# De-enumerate the result
return cls(*[a for i, a in ordered_assignments])
def cse(self, symbols=None, optimizations=None, postprocess=None,
order='canonical'):
"""
Return a new code block with common subexpressions eliminated.
Explanation
===========
See the docstring of :func:`sympy.simplify.cse_main.cse` for more
information.
Examples
========
>>> from sympy import symbols, sin
>>> from sympy.codegen.ast import CodeBlock, Assignment
>>> x, y, z = symbols('x y z')
>>> c = CodeBlock(
... Assignment(x, 1),
... Assignment(y, sin(x) + 1),
... Assignment(z, sin(x) - 1),
... )
...
>>> c.cse()
CodeBlock(
Assignment(x, 1),
Assignment(x0, sin(x)),
Assignment(y, x0 + 1),
Assignment(z, x0 - 1)
)
"""
from sympy.simplify.cse_main import cse
# Check that the CodeBlock only contains assignments to unique variables
if not all(isinstance(i, Assignment) for i in self.args):
# Will support more things later
raise NotImplementedError("CodeBlock.cse only supports Assignments")
if any(isinstance(i, AugmentedAssignment) for i in self.args):
raise NotImplementedError("CodeBlock.cse does not yet work with AugmentedAssignments")
for i, lhs in enumerate(self.left_hand_sides):
if lhs in self.left_hand_sides[:i]:
raise NotImplementedError("Duplicate assignments to the same "
"variable are not yet supported (%s)" % lhs)
# Ensure new symbols for subexpressions do not conflict with existing
existing_symbols = self.atoms(Symbol)
if symbols is None:
symbols = numbered_symbols()
symbols = filter_symbols(symbols, existing_symbols)
replacements, reduced_exprs = cse(list(self.right_hand_sides),
symbols=symbols, optimizations=optimizations, postprocess=postprocess,
order=order)
new_block = [Assignment(var, expr) for var, expr in
zip(self.left_hand_sides, reduced_exprs)]
new_assignments = [Assignment(var, expr) for var, expr in replacements]
return self.topological_sort(new_assignments + new_block)
class For(Token):
"""Represents a 'for-loop' in the code.
Expressions are of the form:
"for target in iter:
body..."
Parameters
==========
target : symbol
iter : iterable
body : CodeBlock or iterable
! When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Range
>>> from sympy.codegen.ast import aug_assign, For
>>> x, i, j, k = symbols('x i j k')
>>> for_i = For(i, Range(10), [aug_assign(x, '+', i*j*k)])
>>> for_i # doctest: -NORMALIZE_WHITESPACE
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
>>> for_ji = For(j, Range(7), [for_i])
>>> for_ji # doctest: -NORMALIZE_WHITESPACE
For(j, iterable=Range(0, 7, 1), body=CodeBlock(
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
))
>>> for_kji =For(k, Range(5), [for_ji])
>>> for_kji # doctest: -NORMALIZE_WHITESPACE
For(k, iterable=Range(0, 5, 1), body=CodeBlock(
For(j, iterable=Range(0, 7, 1), body=CodeBlock(
For(i, iterable=Range(0, 10, 1), body=CodeBlock(
AddAugmentedAssignment(x, i*j*k)
))
))
))
"""
__slots__ = ('target', 'iterable', 'body')
_construct_target = staticmethod(_sympify)
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
@classmethod
def _construct_iterable(cls, itr):
if not iterable(itr):
raise TypeError("iterable must be an iterable")
if isinstance(itr, list): # _sympify errors on lists because they are mutable
itr = tuple(itr)
return _sympify(itr)
class String(Atom, Token):
""" SymPy object representing a string.
Atomic object which is not an expression (as opposed to Symbol).
Parameters
==========
text : str
Examples
========
>>> from sympy.codegen.ast import String
>>> f = String('foo')
>>> f
foo
>>> str(f)
'foo'
>>> f.text
'foo'
>>> print(repr(f))
String('foo')
"""
__slots__ = ('text',)
not_in_args = ['text']
is_Atom = True
@classmethod
def _construct_text(cls, text):
if not isinstance(text, str):
raise TypeError("Argument text is not a string type.")
return text
def _sympystr(self, printer, *args, **kwargs):
return self.text
def kwargs(self, exclude = (), apply = None):
return {}
#to be removed when Atom is given a suitable func
@property
def func(self):
return lambda: self
def _latex(self, printer):
from sympy.printing.latex import latex_escape
return r'\texttt{{"{}"}}'.format(latex_escape(self.text))
class QuotedString(String):
""" Represents a string which should be printed with quotes. """
class Comment(String):
""" Represents a comment. """
class Node(Token):
""" Subclass of Token, carrying the attribute 'attrs' (Tuple)
Examples
========
>>> from sympy.codegen.ast import Node, value_const, pointer_const
>>> n1 = Node([value_const])
>>> n1.attr_params('value_const') # get the parameters of attribute (by name)
()
>>> from sympy.codegen.fnodes import dimension
>>> n2 = Node([value_const, dimension(5, 3)])
>>> n2.attr_params(value_const) # get the parameters of attribute (by Attribute instance)
()
>>> n2.attr_params('dimension') # get the parameters of attribute (by name)
(5, 3)
>>> n2.attr_params(pointer_const) is None
True
"""
__slots__ = ('attrs',)
defaults = {'attrs': Tuple()} # type: tDict[str, Any]
_construct_attrs = staticmethod(_mk_Tuple)
def attr_params(self, looking_for):
""" Returns the parameters of the Attribute with name ``looking_for`` in self.attrs """
for attr in self.attrs:
if str(attr.name) == str(looking_for):
return attr.parameters
class Type(Token):
""" Represents a type.
Explanation
===========
The naming is a super-set of NumPy naming. Type has a classmethod
``from_expr`` which offer type deduction. It also has a method
``cast_check`` which casts the argument to its type, possibly raising an
exception if rounding error is not within tolerances, or if the value is not
representable by the underlying data type (e.g. unsigned integers).
Parameters
==========
name : str
Name of the type, e.g. ``object``, ``int16``, ``float16`` (where the latter two
would use the ``Type`` sub-classes ``IntType`` and ``FloatType`` respectively).
If a ``Type`` instance is given, the said instance is returned.
Examples
========
>>> from sympy.codegen.ast import Type
>>> t = Type.from_expr(42)
>>> t
integer
>>> print(repr(t))
IntBaseType(String('integer'))
>>> from sympy.codegen.ast import uint8
>>> uint8.cast_check(-1) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Minimum value for data type bigger than new value.
>>> from sympy.codegen.ast import float32
>>> v6 = 0.123456
>>> float32.cast_check(v6)
0.123456
>>> v10 = 12345.67894
>>> float32.cast_check(v10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> boost_mp50 = Type('boost::multiprecision::cpp_dec_float_50')
>>> from sympy import cxxcode
>>> from sympy.codegen.ast import Declaration, Variable
>>> cxxcode(Declaration(Variable('x', type=boost_mp50)))
'boost::multiprecision::cpp_dec_float_50 x'
References
==========
.. [1] https://docs.scipy.org/doc/numpy/user/basics.types.html
"""
__slots__ = ('name',)
_construct_name = String
def _sympystr(self, printer, *args, **kwargs):
return str(self.name)
@classmethod
def from_expr(cls, expr):
""" Deduces type from an expression or a ``Symbol``.
Parameters
==========
expr : number or SymPy object
The type will be deduced from type or properties.
Examples
========
>>> from sympy.codegen.ast import Type, integer, complex_
>>> Type.from_expr(2) == integer
True
>>> from sympy import Symbol
>>> Type.from_expr(Symbol('z', complex=True)) == complex_
True
>>> Type.from_expr(sum) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Could not deduce type from expr.
Raises
======
ValueError when type deduction fails.
"""
if isinstance(expr, (float, Float)):
return real
if isinstance(expr, (int, Integer)) or getattr(expr, 'is_integer', False):
return integer
if getattr(expr, 'is_real', False):
return real
if isinstance(expr, complex) or getattr(expr, 'is_complex', False):
return complex_
if isinstance(expr, bool) or getattr(expr, 'is_Relational', False):
return bool_
else:
raise ValueError("Could not deduce type from expr.")
def _check(self, value):
pass
def cast_check(self, value, rtol=None, atol=0, precision_targets=None):
""" Casts a value to the data type of the instance.
Parameters
==========
value : number
rtol : floating point number
Relative tolerance. (will be deduced if not given).
atol : floating point number
Absolute tolerance (in addition to ``rtol``).
type_aliases : dict
Maps substitutions for Type, e.g. {integer: int64, real: float32}
Examples
========
>>> from sympy.codegen.ast import integer, float32, int8
>>> integer.cast_check(3.0) == 3
True
>>> float32.cast_check(1e-40) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Minimum value for data type bigger than new value.
>>> int8.cast_check(256) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
>>> v10 = 12345.67894
>>> float32.cast_check(v10) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> from sympy.codegen.ast import float64
>>> float64.cast_check(v10)
12345.67894
>>> from sympy import Float
>>> v18 = Float('0.123456789012345646')
>>> float64.cast_check(v18)
Traceback (most recent call last):
...
ValueError: Casting gives a significantly different value.
>>> from sympy.codegen.ast import float80
>>> float80.cast_check(v18)
0.123456789012345649
"""
val = sympify(value)
ten = Integer(10)
exp10 = getattr(self, 'decimal_dig', None)
if rtol is None:
rtol = 1e-15 if exp10 is None else 2.0*ten**(-exp10)
def tol(num):
return atol + rtol*abs(num)
new_val = self.cast_nocheck(value)
self._check(new_val)
delta = new_val - val
if abs(delta) > tol(val): # rounding, e.g. int(3.5) != 3.5
raise ValueError("Casting gives a significantly different value.")
return new_val
def _latex(self, printer):
from sympy.printing.latex import latex_escape
type_name = latex_escape(self.__class__.__name__)
name = latex_escape(self.name.text)
return r"\text{{{}}}\left(\texttt{{{}}}\right)".format(type_name, name)
class IntBaseType(Type):
""" Integer base type, contains no size information. """
__slots__ = ('name',)
cast_nocheck = lambda self, i: Integer(int(i))
class _SizedIntType(IntBaseType):
__slots__ = ('name', 'nbits',)
_construct_nbits = Integer
def _check(self, value):
if value < self.min:
raise ValueError("Value is too small: %d < %d" % (value, self.min))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
class SignedIntType(_SizedIntType):
""" Represents a signed integer type. """
@property
def min(self):
return -2**(self.nbits-1)
@property
def max(self):
return 2**(self.nbits-1) - 1
class UnsignedIntType(_SizedIntType):
""" Represents an unsigned integer type. """
@property
def min(self):
return 0
@property
def max(self):
return 2**self.nbits - 1
two = Integer(2)
class FloatBaseType(Type):
""" Represents a floating point number type. """
cast_nocheck = Float
class FloatType(FloatBaseType):
""" Represents a floating point type with fixed bit width.
Base 2 & one sign bit is assumed.
Parameters
==========
name : str
Name of the type.
nbits : integer
Number of bits used (storage).
nmant : integer
Number of bits used to represent the mantissa.
nexp : integer
Number of bits used to represent the mantissa.
Examples
========
>>> from sympy import S
>>> from sympy.codegen.ast import FloatType
>>> half_precision = FloatType('f16', nbits=16, nmant=10, nexp=5)
>>> half_precision.max
65504
>>> half_precision.tiny == S(2)**-14
True
>>> half_precision.eps == S(2)**-10
True
>>> half_precision.dig == 3
True
>>> half_precision.decimal_dig == 5
True
>>> half_precision.cast_check(1.0)
1.0
>>> half_precision.cast_check(1e5) # doctest: +ELLIPSIS
Traceback (most recent call last):
...
ValueError: Maximum value for data type smaller than new value.
"""
__slots__ = ('name', 'nbits', 'nmant', 'nexp',)
_construct_nbits = _construct_nmant = _construct_nexp = Integer
@property
def max_exponent(self):
""" The largest positive number n, such that 2**(n - 1) is a representable finite value. """
# cf. C++'s ``std::numeric_limits::max_exponent``
return two**(self.nexp - 1)
@property
def min_exponent(self):
""" The lowest negative number n, such that 2**(n - 1) is a valid normalized number. """
# cf. C++'s ``std::numeric_limits::min_exponent``
return 3 - self.max_exponent
@property
def max(self):
""" Maximum value representable. """
return (1 - two**-(self.nmant+1))*two**self.max_exponent
@property
def tiny(self):
""" The minimum positive normalized value. """
# See C macros: FLT_MIN, DBL_MIN, LDBL_MIN
# or C++'s ``std::numeric_limits::min``
# or numpy.finfo(dtype).tiny
return two**(self.min_exponent - 1)
@property
def eps(self):
""" Difference between 1.0 and the next representable value. """
return two**(-self.nmant)
@property
def dig(self):
""" Number of decimal digits that are guaranteed to be preserved in text.
When converting text -> float -> text, you are guaranteed that at least ``dig``
number of digits are preserved with respect to rounding or overflow.
"""
from sympy.functions import floor, log
return floor(self.nmant * log(2)/log(10))
@property
def decimal_dig(self):
""" Number of digits needed to store & load without loss.
Explanation
===========
Number of decimal digits needed to guarantee that two consecutive conversions
(float -> text -> float) to be idempotent. This is useful when one do not want
to loose precision due to rounding errors when storing a floating point value
as text.
"""
from sympy.functions import ceiling, log
return ceiling((self.nmant + 1) * log(2)/log(10) + 1)
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
if value == oo: # float(oo) or oo
return float(oo)
elif value == -oo: # float(-oo) or -oo
return float(-oo)
return Float(str(sympify(value).evalf(self.decimal_dig)), self.decimal_dig)
def _check(self, value):
if value < -self.max:
raise ValueError("Value is too small: %d < %d" % (value, -self.max))
if value > self.max:
raise ValueError("Value is too big: %d > %d" % (value, self.max))
if abs(value) < self.tiny:
raise ValueError("Smallest (absolute) value for data type bigger than new value.")
class ComplexBaseType(FloatBaseType):
def cast_nocheck(self, value):
""" Casts without checking if out of bounds or subnormal. """
from sympy.functions import re, im
return (
super().cast_nocheck(re(value)) +
super().cast_nocheck(im(value))*1j
)
def _check(self, value):
from sympy.functions import re, im
super()._check(re(value))
super()._check(im(value))
class ComplexType(ComplexBaseType, FloatType):
""" Represents a complex floating point number. """
# NumPy types:
intc = IntBaseType('intc')
intp = IntBaseType('intp')
int8 = SignedIntType('int8', 8)
int16 = SignedIntType('int16', 16)
int32 = SignedIntType('int32', 32)
int64 = SignedIntType('int64', 64)
uint8 = UnsignedIntType('uint8', 8)
uint16 = UnsignedIntType('uint16', 16)
uint32 = UnsignedIntType('uint32', 32)
uint64 = UnsignedIntType('uint64', 64)
float16 = FloatType('float16', 16, nexp=5, nmant=10) # IEEE 754 binary16, Half precision
float32 = FloatType('float32', 32, nexp=8, nmant=23) # IEEE 754 binary32, Single precision
float64 = FloatType('float64', 64, nexp=11, nmant=52) # IEEE 754 binary64, Double precision
float80 = FloatType('float80', 80, nexp=15, nmant=63) # x86 extended precision (1 integer part bit), "long double"
float128 = FloatType('float128', 128, nexp=15, nmant=112) # IEEE 754 binary128, Quadruple precision
float256 = FloatType('float256', 256, nexp=19, nmant=236) # IEEE 754 binary256, Octuple precision
complex64 = ComplexType('complex64', nbits=64, **float32.kwargs(exclude=('name', 'nbits')))
complex128 = ComplexType('complex128', nbits=128, **float64.kwargs(exclude=('name', 'nbits')))
# Generic types (precision may be chosen by code printers):
untyped = Type('untyped')
real = FloatBaseType('real')
integer = IntBaseType('integer')
complex_ = ComplexBaseType('complex')
bool_ = Type('bool')
class Attribute(Token):
""" Attribute (possibly parametrized)
For use with :class:`sympy.codegen.ast.Node` (which takes instances of
``Attribute`` as ``attrs``).
Parameters
==========
name : str
parameters : Tuple
Examples
========
>>> from sympy.codegen.ast import Attribute
>>> volatile = Attribute('volatile')
>>> volatile
volatile
>>> print(repr(volatile))
Attribute(String('volatile'))
>>> a = Attribute('foo', [1, 2, 3])
>>> a
foo(1, 2, 3)
>>> a.parameters == (1, 2, 3)
True
"""
__slots__ = ('name', 'parameters')
defaults = {'parameters': Tuple()}
_construct_name = String
_construct_parameters = staticmethod(_mk_Tuple)
def _sympystr(self, printer, *args, **kwargs):
result = str(self.name)
if self.parameters:
result += '(%s)' % ', '.join(map(lambda arg: printer._print(
arg, *args, **kwargs), self.parameters))
return result
value_const = Attribute('value_const')
pointer_const = Attribute('pointer_const')
class Variable(Node):
""" Represents a variable.
Parameters
==========
symbol : Symbol
type : Type (optional)
Type of the variable.
attrs : iterable of Attribute instances
Will be stored as a Tuple.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Variable, float32, integer
>>> x = Symbol('x')
>>> v = Variable(x, type=float32)
>>> v.attrs
()
>>> v == Variable('x')
False
>>> v == Variable('x', type=float32)
True
>>> v
Variable(x, type=float32)
One may also construct a ``Variable`` instance with the type deduced from
assumptions about the symbol using the ``deduced`` classmethod:
>>> i = Symbol('i', integer=True)
>>> v = Variable.deduced(i)
>>> v.type == integer
True
>>> v == Variable('i')
False
>>> from sympy.codegen.ast import value_const
>>> value_const in v.attrs
False
>>> w = Variable('w', attrs=[value_const])
>>> w
Variable(w, attrs=(value_const,))
>>> value_const in w.attrs
True
>>> w.as_Declaration(value=42)
Declaration(Variable(w, value=42, attrs=(value_const,)))
"""
__slots__ = ('symbol', 'type', 'value') + Node.__slots__
defaults = Node.defaults.copy()
defaults.update({'type': untyped, 'value': none})
_construct_symbol = staticmethod(sympify)
_construct_value = staticmethod(sympify)
@classmethod
def deduced(cls, symbol, value=None, attrs=Tuple(), cast_check=True):
""" Alt. constructor with type deduction from ``Type.from_expr``.
Deduces type primarily from ``symbol``, secondarily from ``value``.
Parameters
==========
symbol : Symbol
value : expr
(optional) value of the variable.
attrs : iterable of Attribute instances
cast_check : bool
Whether to apply ``Type.cast_check`` on ``value``.
Examples
========
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Variable, complex_
>>> n = Symbol('n', integer=True)
>>> str(Variable.deduced(n).type)
'integer'
>>> x = Symbol('x', real=True)
>>> v = Variable.deduced(x)
>>> v.type
real
>>> z = Symbol('z', complex=True)
>>> Variable.deduced(z).type == complex_
True
"""
if isinstance(symbol, Variable):
return symbol
try:
type_ = Type.from_expr(symbol)
except ValueError:
type_ = Type.from_expr(value)
if value is not None and cast_check:
value = type_.cast_check(value)
return cls(symbol, type=type_, value=value, attrs=attrs)
def as_Declaration(self, **kwargs):
""" Convenience method for creating a Declaration instance.
Explanation
===========
If the variable of the Declaration need to wrap a modified
variable keyword arguments may be passed (overriding e.g.
the ``value`` of the Variable instance).
Examples
========
>>> from sympy.codegen.ast import Variable, NoneToken
>>> x = Variable('x')
>>> decl1 = x.as_Declaration()
>>> # value is special NoneToken() which must be tested with == operator
>>> decl1.variable.value is None # won't work
False
>>> decl1.variable.value == None # not PEP-8 compliant
True
>>> decl1.variable.value == NoneToken() # OK
True
>>> decl2 = x.as_Declaration(value=42.0)
>>> decl2.variable.value == 42
True
"""
kw = self.kwargs()
kw.update(kwargs)
return Declaration(self.func(**kw))
def _relation(self, rhs, op):
try:
rhs = _sympify(rhs)
except SympifyError:
raise TypeError("Invalid comparison %s < %s" % (self, rhs))
return op(self, rhs, evaluate=False)
__lt__ = lambda self, other: self._relation(other, Lt)
__le__ = lambda self, other: self._relation(other, Le)
__ge__ = lambda self, other: self._relation(other, Ge)
__gt__ = lambda self, other: self._relation(other, Gt)
class Pointer(Variable):
""" Represents a pointer. See ``Variable``.
Examples
========
Can create instances of ``Element``:
>>> from sympy import Symbol
>>> from sympy.codegen.ast import Pointer
>>> i = Symbol('i', integer=True)
>>> p = Pointer('x')
>>> p[i+1]
Element(x, indices=(i + 1,))
"""
def __getitem__(self, key):
try:
return Element(self.symbol, key)
except TypeError:
return Element(self.symbol, (key,))
class Element(Token):
""" Element in (a possibly N-dimensional) array.
Examples
========
>>> from sympy.codegen.ast import Element
>>> elem = Element('x', 'ijk')
>>> elem.symbol.name == 'x'
True
>>> elem.indices
(i, j, k)
>>> from sympy import ccode
>>> ccode(elem)
'x[i][j][k]'
>>> ccode(Element('x', 'ijk', strides='lmn', offset='o'))
'x[i*l + j*m + k*n + o]'
"""
__slots__ = ('symbol', 'indices', 'strides', 'offset')
defaults = {'strides': none, 'offset': none}
_construct_symbol = staticmethod(sympify)
_construct_indices = staticmethod(lambda arg: Tuple(*arg))
_construct_strides = staticmethod(lambda arg: Tuple(*arg))
_construct_offset = staticmethod(sympify)
class Declaration(Token):
""" Represents a variable declaration
Parameters
==========
variable : Variable
Examples
========
>>> from sympy.codegen.ast import Declaration, NoneToken, untyped
>>> z = Declaration('z')
>>> z.variable.type == untyped
True
>>> # value is special NoneToken() which must be tested with == operator
>>> z.variable.value is None # won't work
False
>>> z.variable.value == None # not PEP-8 compliant
True
>>> z.variable.value == NoneToken() # OK
True
"""
__slots__ = ('variable',)
_construct_variable = Variable
class While(Token):
""" Represents a 'for-loop' in the code.
Expressions are of the form:
"while condition:
body..."
Parameters
==========
condition : expression convertible to Boolean
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
Examples
========
>>> from sympy import symbols, Gt, Abs
>>> from sympy.codegen import aug_assign, Assignment, While
>>> x, dx = symbols('x dx')
>>> expr = 1 - x**2
>>> whl = While(Gt(Abs(dx), 1e-9), [
... Assignment(dx, -expr/expr.diff(x)),
... aug_assign(x, '+', dx)
... ])
"""
__slots__ = ('condition', 'body')
_construct_condition = staticmethod(lambda cond: _sympify(cond))
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class Scope(Token):
""" Represents a scope in the code.
Parameters
==========
body : CodeBlock or iterable
When passed an iterable it is used to instantiate a CodeBlock.
"""
__slots__ = ('body',)
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
class Stream(Token):
""" Represents a stream.
There are two predefined Stream instances ``stdout`` & ``stderr``.
Parameters
==========
name : str
Examples
========
>>> from sympy import pycode, Symbol
>>> from sympy.codegen.ast import Print, stderr, QuotedString
>>> print(pycode(Print(['x'], file=stderr)))
print(x, file=sys.stderr)
>>> x = Symbol('x')
>>> print(pycode(Print([QuotedString('x')], file=stderr))) # print literally "x"
print("x", file=sys.stderr)
"""
__slots__ = ('name',)
_construct_name = String
stdout = Stream('stdout')
stderr = Stream('stderr')
class Print(Token):
""" Represents print command in the code.
Parameters
==========
formatstring : str
*args : Basic instances (or convertible to such through sympify)
Examples
========
>>> from sympy.codegen.ast import Print
>>> from sympy import pycode
>>> print(pycode(Print('x y'.split(), "coordinate: %12.5g %12.5g")))
print("coordinate: %12.5g %12.5g" % (x, y))
"""
__slots__ = ('print_args', 'format_string', 'file')
defaults = {'format_string': none, 'file': none}
_construct_print_args = staticmethod(_mk_Tuple)
_construct_format_string = QuotedString
_construct_file = Stream
class FunctionPrototype(Node):
""" Represents a function prototype
Allows the user to generate forward declaration in e.g. C/C++.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import ccode, symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
"""
__slots__ = ('return_type', 'name', 'parameters', 'attrs')
_construct_return_type = Type
_construct_name = String
@staticmethod
def _construct_parameters(args):
def _var(arg):
if isinstance(arg, Declaration):
return arg.variable
elif isinstance(arg, Variable):
return arg
else:
return Variable.deduced(arg)
return Tuple(*map(_var, args))
@classmethod
def from_FunctionDefinition(cls, func_def):
if not isinstance(func_def, FunctionDefinition):
raise TypeError("func_def is not an instance of FunctionDefiniton")
return cls(**func_def.kwargs(exclude=('body',)))
class FunctionDefinition(FunctionPrototype):
""" Represents a function definition in the code.
Parameters
==========
return_type : Type
name : str
parameters: iterable of Variable instances
body : CodeBlock or iterable
attrs : iterable of Attribute instances
Examples
========
>>> from sympy import ccode, symbols
>>> from sympy.codegen.ast import real, FunctionPrototype
>>> x, y = symbols('x y', real=True)
>>> fp = FunctionPrototype(real, 'foo', [x, y])
>>> ccode(fp)
'double foo(double x, double y)'
>>> from sympy.codegen.ast import FunctionDefinition, Return
>>> body = [Return(x*y)]
>>> fd = FunctionDefinition.from_FunctionPrototype(fp, body)
>>> print(ccode(fd))
double foo(double x, double y){
return x*y;
}
"""
__slots__ = FunctionPrototype.__slots__[:-1] + ('body', 'attrs')
@classmethod
def _construct_body(cls, itr):
if isinstance(itr, CodeBlock):
return itr
else:
return CodeBlock(*itr)
@classmethod
def from_FunctionPrototype(cls, func_proto, body):
if not isinstance(func_proto, FunctionPrototype):
raise TypeError("func_proto is not an instance of FunctionPrototype")
return cls(body=body, **func_proto.kwargs())
class Return(Token):
""" Represents a return command in the code.
Parameters
==========
return : Basic
Examples
========
>>> from sympy.codegen.ast import Return
>>> from sympy.printing.pycode import pycode
>>> from sympy import Symbol
>>> x = Symbol('x')
>>> print(pycode(Return(x)))
return x
"""
__slots__ = ('return',)
_construct_return=staticmethod(_sympify)
class FunctionCall(Token, Expr):
""" Represents a call to a function in the code.
Parameters
==========
name : str
function_args : Tuple
Examples
========
>>> from sympy.codegen.ast import FunctionCall
>>> from sympy import pycode
>>> fcall = FunctionCall('foo', 'bar baz'.split())
>>> print(pycode(fcall))
foo(bar, baz)
"""
__slots__ = ('name', 'function_args')
_construct_name = String
_construct_function_args = staticmethod(lambda args: Tuple(*args))
|
py | 1a35c98ff177c834775e82b83ab74448d4af68d2 | import json
class Config:
@staticmethod
def get():
with open('config.json') as conf:
result = json.load(conf)
return result
|
py | 1a35caeb3275ca427f709af538d11090d3512554 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Prepare dataset for keras model benchmark."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from official.utils.misc import model_helpers # pylint: disable=g-bad-import-order
# Default values for dataset.
_NUM_CHANNELS = 3
_NUM_CLASSES = 1000
def _get_default_image_size(model):
"""Provide default image size for each model."""
image_size = (224, 224)
if model in ["inceptionv3", "xception", "inceptionresnetv2"]:
image_size = (299, 299)
elif model in ["nasnetlarge"]:
image_size = (331, 331)
return image_size
def generate_synthetic_input_dataset(model, batch_size):
"""Generate synthetic dataset."""
image_size = _get_default_image_size(model)
image_shape = (batch_size,) + image_size + (_NUM_CHANNELS,)
label_shape = (batch_size, _NUM_CLASSES)
dataset = model_helpers.generate_synthetic_data(
input_shape=tf.TensorShape(image_shape),
label_shape=tf.TensorShape(label_shape),
)
return dataset
class Cifar10Dataset(object):
"""CIFAR10 dataset, including train and test set.
Each sample consists of a 32x32 color image, and label is from 10 classes.
"""
def __init__(self, batch_size):
"""Initializes train/test datasets.
Args:
batch_size: int, the number of batch size.
"""
self.input_shape = (32, 32, 3)
self.num_classes = 10
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
y_train, y_test = y_train.astype(np.int64), y_test.astype(np.int64)
y_train = tf.keras.utils.to_categorical(y_train, self.num_classes)
y_test = tf.keras.utils.to_categorical(y_test, self.num_classes)
self.train_dataset = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(2000).batch(batch_size).repeat()
self.test_dataset = tf.data.Dataset.from_tensor_slices(
(x_test, y_test)).shuffle(2000).batch(batch_size).repeat()
|
py | 1a35cafdf7564f25be613eb315e17fa4e3f629a6 | # Copyright 2022 The jax3d Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for geometry_utils."""
import jax
import jax.numpy as jnp
from jax3d.projects.nesf.nerfstatic.utils import geometry_utils as geom
from jax3d.projects.nesf.nerfstatic.utils import types
import numpy as np
def _make_rays(origin, direction):
n, _ = origin.shape
return types.Rays(scene_id=jnp.zeros((n, 1), dtype=jnp.int32),
origin=origin,
direction=direction)
def test_scale():
transform = geom.Scale(scale=jnp.array([1, 2, 3]))
rays = _make_rays(origin=np.array([[1, 1, 0]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0, 1], 2) # pytype: disable=attribute-error
def test_rotate():
transform = geom.Rotate(axis=jnp.array([0, 0, 1]), radians=np.pi/2)
rays = _make_rays(origin=np.array([[1, 1, 0]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.array([-1, 1, 0])) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([1/np.sqrt(2), 1/np.sqrt(2), 0]))
def test_translate():
transform = geom.Translate(offset=jnp.array([1, 2, 3]))
rays = _make_rays(origin=np.array([[1, 1, 0]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.array([2, 3, 3])) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([1/np.sqrt(2), -1/np.sqrt(2), 0]))
def test_compose():
transform = geom.Compose(transforms=[
geom.Scale(scale=np.array([1, 2, 1])),
geom.Rotate(axis=np.array([0, 0, 1]), radians=np.pi/2),
geom.Translate(offset=jnp.array([1, -1, 3])),
])
rays = _make_rays(origin=np.array([[1, 0.5, -3]]),
direction=np.array([[1/np.sqrt(2), -1/np.sqrt(2), 0]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.zeros(3)) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([2/np.sqrt(2), 1/np.sqrt(2), 0]))
def test_inverse():
transform = geom.Inverse(transform=geom.Compose(transforms=[
geom.Scale(scale=np.array([1, 2, 1])),
geom.Rotate(axis=np.array([0, 0, 1]), radians=np.pi/2),
geom.Translate(offset=jnp.array([1, -1, 3])),
]))
rays = _make_rays(origin=np.array([[0, 0, 0]]),
direction=np.array([[1.4142135, 0.70710677, 0.]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
np.testing.assert_allclose(rays2.origin[0], np.array([1, 0.5, -3])) # pytype: disable=attribute-error
np.testing.assert_allclose(rays2.direction[0],
np.array([1/np.sqrt(2), -1/np.sqrt(2), 0]))
def test_identity():
transform = geom.Identity()
rays = _make_rays(origin=np.array([[0, 0, 0]]),
direction=np.array([[1.4142135, 0.70710677, 0.]]))
rays2 = transform.forward(rays)
rays3 = transform.backward(rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays2)
jax.tree_map(np.testing.assert_allclose, rays, rays3)
def test_sample_points():
n = 20
k = 7
sample_points = types.SamplePoints(
scene_id=np.random.randint(0, 5, size=(n, 1)),
position=np.random.randn(n, k, 3),
direction=np.random.randn(n, 3))
rays = geom._sample_points_to_rays(sample_points)
sample_points2 = geom._rays_to_sample_points(rays, sample_points.batch_shape)
jax.tree_map(np.testing.assert_allclose, sample_points, sample_points2)
|
py | 1a35cc2ddf31ee92e558873eeced7869352ee748 | from src.if_else import if_else
from src.secint import secint as s
def maximum(quotients):
"""
Returns both the maximum quotient and the index of the maximum in an
oblivious sequence.
Only works for quotients that have positive numerator and denominator.
"""
def max(previous, current):
(maximum, index_of_maximum, index) = previous
is_new_maximum = ge_quotient(current, maximum)
index_of_maximum = if_else(is_new_maximum, index, index_of_maximum)
maximum = tuple(if_else(is_new_maximum,
list(current),
list(maximum)))
return (maximum, index_of_maximum, index + 1)
neutral = (s(0), s(0))
initial = (neutral, s(0), s(0))
maximum, index_of_maximum, _ = quotients.reduce(neutral, max, initial)
return maximum, index_of_maximum
def ge_quotient(left, right):
"""
Returns whether the left quotient is greater than or equal than the right
quotient.
Only works for quotients that have positive numerator and denominator.
"""
(a, b) = left
(c, d) = right
return a * d >= b * c
|
py | 1a35cdb4aa12a6dd730ed371ee25522957273dc9 | # a file
|
py | 1a35cf48d3ce437e4f88997dc01b8e0245cc61aa | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
from aliyunsdkecs.endpoint import endpoint_data
class DescribeSnapshotLinksRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Ecs', '2014-05-26', 'DescribeSnapshotLinks','ecs')
self.set_method('POST')
if hasattr(self, "endpoint_map"):
setattr(self, "endpoint_map", endpoint_data.getEndpointMap())
if hasattr(self, "endpoint_regional"):
setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional())
def get_ResourceOwnerId(self): # Long
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self, ResourceOwnerId): # Long
self.add_query_param('ResourceOwnerId', ResourceOwnerId)
def get_PageNumber(self): # Integer
return self.get_query_params().get('PageNumber')
def set_PageNumber(self, PageNumber): # Integer
self.add_query_param('PageNumber', PageNumber)
def get_PageSize(self): # Integer
return self.get_query_params().get('PageSize')
def set_PageSize(self, PageSize): # Integer
self.add_query_param('PageSize', PageSize)
def get_DiskIds(self): # String
return self.get_query_params().get('DiskIds')
def set_DiskIds(self, DiskIds): # String
self.add_query_param('DiskIds', DiskIds)
def get_ResourceOwnerAccount(self): # String
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self, ResourceOwnerAccount): # String
self.add_query_param('ResourceOwnerAccount', ResourceOwnerAccount)
def get_OwnerAccount(self): # String
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self, OwnerAccount): # String
self.add_query_param('OwnerAccount', OwnerAccount)
def get_SnapshotLinkIds(self): # String
return self.get_query_params().get('SnapshotLinkIds')
def set_SnapshotLinkIds(self, SnapshotLinkIds): # String
self.add_query_param('SnapshotLinkIds', SnapshotLinkIds)
def get_OwnerId(self): # Long
return self.get_query_params().get('OwnerId')
def set_OwnerId(self, OwnerId): # Long
self.add_query_param('OwnerId', OwnerId)
def get_InstanceId(self): # String
return self.get_query_params().get('InstanceId')
def set_InstanceId(self, InstanceId): # String
self.add_query_param('InstanceId', InstanceId)
|
py | 1a35cfe987b62cf393a7d80d3ca95cf8588c4e19 | """objects.
This submodule includes the unidimensional object classes.
"""
from .object import Object
from .system import SystemObjects
from .single import SingleObject
__all__ = ['Object', 'SystemObjects', 'SingleObject']
|
py | 1a35cfef35c7dcd07557fec9edb4a331f16409d2 | # -*- coding: utf-8 -*-
import asyncio
import os
import sys
root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(root + '/python')
import ccxt.async as ccxt # noqa: E402
def get_active_symbols(exchange):
return [symbol for symbol in exchange.symbols if is_active_symbol(exchange, symbol)]
def is_active_symbol(exchange, symbol):
return ('.' not in symbol) and (('active' not in exchange.markets[symbol]) or (exchange.markets[symbol]['active']))
async def fetch_ticker(exchange, symbol):
ticker = await exchange.fetchTicker(symbol)
print(exchange.id, symbol, ticker)
return ticker
async def fetch_tickers(exchange):
await exchange.load_markets()
print(exchange.id, 'fetching all tickers by simultaneous multiple concurrent requests')
symbols_to_load = get_active_symbols(exchange)
input_coroutines = [fetch_ticker(exchange, symbol) for symbol in symbols_to_load]
tickers = await asyncio.gather(*input_coroutines, return_exceptions=True)
for ticker, symbol in zip(tickers, symbols_to_load):
if not isinstance(ticker, dict):
print(exchange.id, symbol, 'error')
else:
print(exchange.id, symbol, 'ok')
print(exchange.id, 'fetched', len(list(tickers)), 'tickers')
asyncio.get_event_loop().run_until_complete(fetch_tickers(ccxt.bitfinex({
'enableRateLimit': True, # this option enables the built-in rate limiter
})))
|
py | 1a35d15741f06a9a35256b399d57623ca1f86661 | # -*- coding: utf-8 -*-
###########################################################################
## Python code generated with wxFormBuilder (version Jun 17 2015)
## http://www.wxformbuilder.org/
##
## PLEASE DO "NOT" EDIT THIS FILE!
###########################################################################
import wx
import wx.xrc
import win32gui
from MyConfigParser import *
from LoginFrame import *
from CardHit import *
###########################################################################
## Class MainFrame
###########################################################################
isLogin = False
class MainFrame(wx.Frame):
def __init__(self, parent):
self.windowTitle = '沪牌拍卖系统'
wx.Frame.__init__(self, parent, id=wx.ID_ANY, title=self.windowTitle, pos=wx.DefaultPosition,
size=wx.Size(480, 481), style=wx.DEFAULT_FRAME_STYLE | wx.TAB_TRAVERSAL)
self.cardHit = CardHit()
self.file = "conf\\config.ini"
self.conf = MyConfigParser()
self.conf.read(self.file)
outSizer = wx.FlexGridSizer(0, 2, 0, 0)
outSizer.SetFlexibleDirection(wx.BOTH)
outSizer.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.title = wx.StaticText(self, wx.ID_ANY, u"沪牌拍卖系统", wx.DefaultPosition, wx.DefaultSize, 0)
self.title.Wrap(-1)
self.title.SetFont(wx.Font(30, 70, 90, 90, False, wx.EmptyString))
outSizer.Add(self.title, 0, wx.ALL, 5)
self.loginBtn = wx.Button(self, wx.ID_ANY, u"登录", wx.DefaultPosition, wx.DefaultSize, 0)
outSizer.Add(self.loginBtn, 0, wx.ALL, 5)
strategySizer = wx.BoxSizer(wx.VERTICAL)
straSizer1 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, u"策略1"), wx.VERTICAL)
self.comm1 = wx.StaticText(straSizer1.GetStaticBox(), wx.ID_ANY, u"说明:设置固定金额,最低价加300大于等于该固定金额时出价",
wx.DefaultPosition, wx.DefaultSize, 0)
self.comm1.Wrap(-1)
straSizer1.Add(self.comm1, 0, wx.ALL, 5)
dtlInfoSizer1 = wx.GridSizer(0, 2, 0, 0)
self.seq1 = wx.StaticText(straSizer1.GetStaticBox(), wx.ID_ANY, u"序号:", wx.DefaultPosition, wx.DefaultSize, 0)
self.seq1.Wrap(-1)
dtlInfoSizer1.Add(self.seq1, 0, wx.ALL, 5)
seqList1Choices = [u"1", u"2"]
self.seqList1 = wx.Choice(straSizer1.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, seqList1Choices,
0)
self.seqList1.SetSelection(0)
dtlInfoSizer1.Add(self.seqList1, 0, wx.ALL, 5)
self.priceLabel = wx.StaticText(straSizer1.GetStaticBox(), wx.ID_ANY, u"价格:", wx.DefaultPosition, wx.DefaultSize, 0)
self.priceLabel.Wrap(-1)
dtlInfoSizer1.Add(self.priceLabel, 0, wx.ALL, 5)
self.price = wx.TextCtrl(straSizer1.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition, wx.DefaultSize,
0)
dtlInfoSizer1.Add(self.price, 0, wx.ALL, 5)
straSizer1.Add(dtlInfoSizer1, 1, wx.EXPAND, 5)
btnSizer1 = wx.GridBagSizer(0, 0)
btnSizer1.SetFlexibleDirection(wx.BOTH)
btnSizer1.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.saveBtn1 = wx.Button(straSizer1.GetStaticBox(), wx.ID_ANY, u"保存", wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer1.Add(self.saveBtn1, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL, 5)
self.resetBtn1 = wx.Button(straSizer1.GetStaticBox(), wx.ID_ANY, u"重置", wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer1.Add(self.resetBtn1, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
self.runBtn1 = wx.Button(straSizer1.GetStaticBox(), wx.ID_ANY, u"运行", wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer1.Add(self.runBtn1, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
straSizer1.Add(btnSizer1, 1, wx.EXPAND, 5)
strategySizer.Add(straSizer1, 1, wx.EXPAND, 5)
straSizer2 = wx.StaticBoxSizer(wx.StaticBox(self, wx.ID_ANY, u"策略2"), wx.VERTICAL)
self.comm2 = wx.StaticText(straSizer2.GetStaticBox(), wx.ID_ANY, u"说明:指定时间在最低价基础上增加指定金额出价", wx.DefaultPosition,
wx.DefaultSize, 0)
self.comm2.Wrap(-1)
straSizer2.Add(self.comm2, 0, wx.ALL, 5)
dtlInfoSizer2 = wx.GridSizer(0, 2, 0, 0)
self.seq2 = wx.StaticText(straSizer2.GetStaticBox(), wx.ID_ANY, u"序号:", wx.DefaultPosition, wx.DefaultSize, 0)
self.seq2.Wrap(-1)
dtlInfoSizer2.Add(self.seq2, 0, wx.ALL, 5)
seqList2Choices = [u"1", u"2"]
self.seqList2 = wx.Choice(straSizer2.GetStaticBox(), wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, seqList2Choices,
0)
self.seqList2.SetSelection(0)
dtlInfoSizer2.Add(self.seqList2, 0, wx.ALL, 5)
self.offTimeLabel = wx.StaticText(straSizer2.GetStaticBox(), wx.ID_ANY, u"出价时间:", wx.DefaultPosition,
wx.DefaultSize, 0)
self.offTimeLabel.Wrap(-1)
dtlInfoSizer2.Add(self.offTimeLabel, 0, wx.ALL, 5)
gbSizer3 = wx.GridBagSizer(0, 0)
gbSizer3.SetFlexibleDirection(wx.BOTH)
gbSizer3.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
hourPickChoices = [u"11"]
self.hourPick = wx.ComboBox(straSizer2.GetStaticBox(), wx.ID_ANY, u"11", wx.DefaultPosition, wx.DefaultSize,
hourPickChoices, 0)
self.hourPick.SetSelection(0)
gbSizer3.Add(self.hourPick, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL, 5)
minuPickChoices = [u"00", u"01", u"02", u"03", u"04", u"05", u"06", u"07", u"08", u"09", u"10", u"11", u"12", u"13",
u"14", u"15", u"16", u"17", u"18", u"19", u"20", u"21", u"22", u"23", u"24", u"25", u"26", u"27",
u"28", u"29", u"30"]
self.minuPick = wx.ComboBox(straSizer2.GetStaticBox(), wx.ID_ANY, u"00", wx.DefaultPosition, wx.DefaultSize,
minuPickChoices, 0)
self.minuPick.SetSelection(0)
gbSizer3.Add(self.minuPick, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
secPickChoices = [u"00", u"01", u"02", u"03", u"04", u"05", u"06", u"07", u"08", u"09", u"10", u"11", u"12", u"13",
u"14", u"15", u"16", u"17", u"18", u"19", u"20", u"21", u"22", u"23", u"24", u"25", u"26", u"27",
u"28", u"29", u"30", u"31", u"32", u"33", u"34", u"35", u"36", u"37", u"38", u"39", u"40", u"41",
u"42", u"43", u"44", u"45", u"46", u"47", u"48", u"49", u"50", u"51", u"52", u"53", u"54", u"55",
u"56", u"57", u"58", u"59"]
self.secPick = wx.ComboBox(straSizer2.GetStaticBox(), wx.ID_ANY, u"00", wx.DefaultPosition, wx.DefaultSize,
secPickChoices, 0)
self.secPick.SetSelection(0)
gbSizer3.Add(self.secPick, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
dtlInfoSizer2.Add(gbSizer3, 1, wx.EXPAND, 5)
self.addPriceLabel = wx.StaticText(straSizer2.GetStaticBox(), wx.ID_ANY, u"加价金额:", wx.DefaultPosition,
wx.DefaultSize, 0)
self.addPriceLabel.Wrap(-1)
dtlInfoSizer2.Add(self.addPriceLabel, 0, wx.ALL, 5)
self.addPrice = wx.TextCtrl(straSizer2.GetStaticBox(), wx.ID_ANY, wx.EmptyString, wx.DefaultPosition,
wx.Size(150, -1), 0)
dtlInfoSizer2.Add(self.addPrice, 0, wx.ALL, 5)
straSizer2.Add(dtlInfoSizer2, 1, wx.EXPAND, 5)
btnSizer2 = wx.GridBagSizer(0, 0)
btnSizer2.SetFlexibleDirection(wx.BOTH)
btnSizer2.SetNonFlexibleGrowMode(wx.FLEX_GROWMODE_SPECIFIED)
self.saveBtn2 = wx.Button(straSizer2.GetStaticBox(), wx.ID_ANY, u"保存", wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer2.Add(self.saveBtn2, wx.GBPosition(0, 0), wx.GBSpan(1, 1), wx.ALL, 5)
self.resetBtn2 = wx.Button(straSizer2.GetStaticBox(), wx.ID_ANY, u"重置", wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer2.Add(self.resetBtn2, wx.GBPosition(0, 1), wx.GBSpan(1, 1), wx.ALL, 5)
self.runBtn2 = wx.Button(straSizer2.GetStaticBox(), wx.ID_ANY, u"运行", wx.DefaultPosition, wx.DefaultSize, 0)
btnSizer2.Add(self.runBtn2, wx.GBPosition(0, 2), wx.GBSpan(1, 1), wx.ALL, 5)
straSizer2.Add(btnSizer2, 1, wx.EXPAND, 5)
strategySizer.Add(straSizer2, 1, wx.EXPAND, 5)
outSizer.Add(strategySizer, 1, wx.EXPAND, 5)
compSizer = wx.BoxSizer(wx.VERTICAL)
self.compRunBtn = wx.Button(self, wx.ID_ANY, u"运行", wx.Point(-1, -1), wx.Size(-1, -1), 0)
compSizer.Add(self.compRunBtn, 0, wx.ALL, 5)
self.compComment = wx.StaticText(self, wx.ID_ANY, u"说明:此处针对组\n合策略模式,点击\n运行前必须要选择\n策略中的序号,将\n按照序号依次判断\n执行",
wx.DefaultPosition, wx.DefaultSize, 0)
self.compComment.Wrap(-1)
compSizer.Add(self.compComment, 0, wx.ALL, 5)
outSizer.Add(compSizer, 1, wx.EXPAND, 5)
self.SetSizer(outSizer)
self.Layout()
self.Centre(wx.BOTH)
# Connect Events
self.loginBtn.Bind(wx.EVT_BUTTON, self.popLoginWdw)
self.saveBtn1.Bind(wx.EVT_BUTTON, self.saveStra1)
self.resetBtn1.Bind(wx.EVT_BUTTON, self.resetStra1)
self.runBtn1.Bind(wx.EVT_BUTTON, self.runStra1)
self.saveBtn2.Bind(wx.EVT_BUTTON, self.saveStra2)
self.resetBtn2.Bind(wx.EVT_BUTTON, self.resetStra2)
self.runBtn2.Bind(wx.EVT_BUTTON, self.runStra2)
self.compRunBtn.Bind(wx.EVT_BUTTON, self.compRun)
def __del__(self):
pass
def setLogInfo(self,loginFlag,userName):
global isLogin
isLogin = loginFlag
self.title.SetLabel(userName + "登录成功")
# Virtual event handlers, overide them in your derived class
def popLoginWdw(self, event):
loginFrame = LoginFrame(self)
loginFrame.ShowModal()
def saveStra1(self, event):
self.clearStrategy()
self.conf.add_section('strategy=pointPrice')
self.conf.set('strategy=pointPrice','pointPrice',self.price.Value)
self.showSuccess(event)
def resetStra1(self, event):
self.price.Clear()
def runStra1(self, event):
if self.checkLogin() == True:
self.saveStra1(None)
self.cardHit.executeStrategy()
def saveStra2(self, event):
self.clearStrategy()
self.conf.add_section('strategy=pointTimeAndAddPrice')
self.conf.set('strategy=pointTimeAndAddPrice', 'time', self.hourPick.Value+':'+self.minuPick.Value+':'+self.secPick.Value)
self.conf.set('strategy=pointTimeAndAddPrice', 'add', self.addPrice.Value)
self.showSuccess(event)
def resetStra2(self, event):
self.hourPick.Value='11'
self.minuPick.Value='00'
self.secPick.Value='00'
self.addPrice.Clear()
def runStra2(self, event):
if self.checkLogin() == True:
self.saveStra2(None)
self.cardHit.executeStrategy()
def loopList(self,list,seq):
for val in list:
if int(val[0]) == seq:
return val[1]
def saveCompStra1(self):
self.conf.set('strategy=compose', 'stra1', 'pointPrice')
self.conf.set('strategy=compose', 'pointPrice', self.price.Value)
def saveCompStra2(self):
self.conf.set('strategy=compose', 'stra2', 'pointTimeAndAddPrice')
self.conf.set('strategy=compose', 'time',
self.hourPick.Value + ':' + self.minuPick.Value + ':' + self.secPick.Value)
self.conf.set('strategy=compose', 'add', self.addPrice.Value)
def saveCompStra(self,event):
self.clearStrategy()
self.conf.add_section('strategy=compose')
idx = 1
list = []
list.append((self.seqList1.GetString(self.seqList1.GetSelection()),'stra1'))
list.append((self.seqList2.GetString(self.seqList2.GetSelection()),'stra2'))
while idx<=len(list):
if self.loopList(list, idx) == 'stra1':
self.saveCompStra1()
if self.loopList(list, idx) == 'stra2':
self.saveCompStra2()
idx = idx + 1
self.showSuccess(event)
def compRun(self, event):
if self.checkLogin() == True:
self.saveCompStra(None)
self.cardHit.executeStrategy()
def clearStrategy(self):
self.conf.remove_section('strategy=pointTimeAndAddPrice')
self.conf.remove_section('strategy=pointPrice')
self.conf.remove_section('strategy=compose')
def showSuccess(self,event):
self.conf.write(open(self.file,"w"))
# 直接点击运行时不弹出该提示框
if event != None:
dlg = wx.MessageDialog(None, u"保存成功", u"提示", wx.OK)
if dlg.ShowModal() == wx.ID_OK:
dlg.Destroy()
def checkLogin(self):
global isLogin
if isLogin == False:
dlg = wx.MessageDialog(None, u"登录成功后才可以运行", u"提示", wx.OK|wx.CANCEL)
if dlg.ShowModal() == wx.ID_OK:
self.popLoginWdw(None)
dlg.Destroy()
return isLogin
if __name__ == "__main__":
app = wx.App()
mainFrame = MainFrame(None)
mainFrame.Show()
app.MainLoop()
|
py | 1a35d1a817e51e8f6ec067d8eaf7e014a64b3447 | """
A CapitalT class and methods that use the Cross class.
Authors: David Mutchler, Dave Fisher, Valerie Galluzzi, Amanda Stouder,
their colleagues and Jun Fan.
""" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.
import rosegraphics as rg
def main():
""" Calls the test functions. As you implement CapitalT method uncomment the appropriate tests. """
# --------------------------------------------------------------
# Uncomment only 1 test at a time as you develop your code.
# --------------------------------------------------------------
print('Un-comment the calls in MAIN one by one')
print(' to run the testing code as you complete the TODOs.')
run_test_simple_t()
run_test_set_colors()
run_test_move_by()
run_test_clone()
def run_test_simple_t():
""" Tests for the __init__ method and attach_to method. See the simple_t PDF for expected output. """
print()
print('--------------------------------------------------')
print('Testing __init__ and attach_to ')
print('--------------------------------------------------')
window = rg.RoseWindow(600, 400, 'Test 1 - Simple Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
print("Expected: Point(250.0, 40.0) Point(350.0, 60.0)")
print("Actual: ", t1.h_rect.get_upper_left_corner(), t1.h_rect.get_lower_right_corner())
print("Expected: Point(290.0, 40.0) Point(310.0, 240.0)")
print("Actual: ", t1.v_rect.get_upper_left_corner(), t1.v_rect.get_lower_right_corner())
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.attach_to(window)
window.render()
print("See graphics window and compare to the simple_t PDF")
window.close_on_mouse_click()
def run_test_set_colors():
""" Tests for the set_colors method. See the set_colors PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 2 - Colorful Ts')
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_colors('red', 'magenta')
t1.attach_to(window)
t2 = CapitalT(rg.Point(150, 150), 100, 150, 40)
t2.set_colors('green', 'purple')
t2.attach_to(window)
t3 = CapitalT(rg.Point(450, 150), 10, 15, 4)
t3.set_colors('blue', 'gray')
t3.attach_to(window)
window.render()
window.close_on_mouse_click()
def run_test_move_by():
""" Tests for the move_by method. See the move_by PDF for expected output. """
window = rg.RoseWindow(600, 400, 'Test 3 - Moving T')
little_red_t = CapitalT(rg.Point(300, 50), 60, 80, 5)
little_red_t.set_colors('red', 'gray')
little_red_t.attach_to(window)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
little_red_t.move_by(0, 100)
window.render(0.5)
for k in range(40):
little_red_t.move_by(5, -2)
window.render(0.05)
window.close_on_mouse_click()
def run_test_clone():
""" Tests for the clone method. See the clone PDF for expected output. """
window = rg.RoseWindow(650, 400, 'Test 4 - Cloning Ts')
first_t = CapitalT(rg.Point(75, 50), 80, 80, 40)
first_t.set_colors('blue', 'cyan')
for k in range(6):
t = first_t.clone()
if k < 2:
t.set_colors('white', 'black')
t.move_by(100 * k, 20 * k)
t.attach_to(window)
first_t.move_by(0, 200)
first_t.attach_to(window)
window.render()
window.close_on_mouse_click()
########################################################################
# The CapitalT class (and its methods) begins here.
########################################################################
class CapitalT(object):
""" Manages a CapitalT graphics object which is made up of two rectangles. """
def __init__(self, intersection_center, width, height, letter_thickness):
"""
What comes in:
-- self
-- an rg.Point for the intersection center of the CapitalT
-- This point is also center of the horizontal rectangle.
-- a int for the width of the CapitalT (the width of the horizontal rectangle)
-- a int for the height of the CapitalT (the height of the vertical rectangle)
-- a int for the thickness of each rectangle (the letter's thickness)
What goes out: Nothing (i.e., None).
Side effects: Sets two instance variables named:
-- h_rect (to represent the horizontal rectangle in the T, the top bar)
-- v_rect (to represent the vertical rectangle in the T, the | part of the T)
*** See the dimensions PDF for the exact placement of the rectangles in the T. ***
Each rectangle is an rg.Rectangle. Unlike prior modules you are NOT
allowed to make any other instance variables. You may only use
exactly these two and must figure out how to do the problem with ONLY
those two instance variables.
Example:
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
-- t1.h_rect would have an upper left corner of (250, 40)
-- t1.h_rect would have an lower right corner of (350, 60)
-- t1.v_rect would have an upper left corner of (290, 40)
-- t1.v_rect would have an lower right corner of (310, 240)
Type hints:
:type intersection_center: rg.Point
:type width: int
:type height: int
:type letter_thickness: int
"""
# --------------------------------------------------------------
# DONE: 3.
# READ the above specification, including the Example.
# Implement this method
# Note: you will need to also implement attach_to before testing
# --------------------------------------------------------------
self.intersection = intersection_center.clone()
self.width = width
self.height = height
self.thickness = letter_thickness
self.h_rect = rg.Rectangle(rg.Point(self.intersection.x + 0.5 * self.width,
self.intersection.y + 0.5 * self.thickness),
rg.Point(self.intersection.x - 0.5 * self.width,
self.intersection.y - 0.5 * self.thickness))
self.v_rect = rg.Rectangle(rg.Point(self.intersection.x - 0.5 * self.thickness,
self.intersection.y - 0.5 * self.thickness),
rg.Point(self.intersection.x + 0.5 * self.thickness,
self.intersection.y + self.height - 0.5 * self.thickness))
def attach_to(self, window):
"""
What comes in:
-- self
-- an rg.RoseWindow
What goes out: Nothing (i.e., None).
Side effects:
-- Attaches both instance rectangles to the given window.
-- Hint: Attach h_rect second to make it draw in front of v_rect
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
Type hints:
:type window: rg.RoseWindow
"""
# --------------------------------------------------------------
# DONE: 4.
# READ the above specification, including the Example.
# Implement and test this method by looking at the console and
# the graphics window (compare it to simple_t.pdf)
# --------------------------------------------------------------
self.v_rect.attach_to(window)
self.h_rect.attach_to(window)
window.render()
def set_colors(self, fill_color, outline_color):
"""
What comes in:
-- self
-- a string that represents a valid rosegraphics color
-- a string that represents a valid rosegraphics color
What goes out: Nothing (i.e., None).
Side effects:
-- sets the fill_color of both rectangles to the given fill color
-- sets the outline_color of both rectangles to the given outline color
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
Type hints:
:type fill_color: str
:type outline_color: str
"""
# --------------------------------------------------------------
# DONE: 5.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# set_colors.pdf.
# --------------------------------------------------------------
self.h_rect.fill_color = fill_color
self.h_rect.outline_color = outline_color
self.v_rect.fill_color = fill_color
self.v_rect.outline_color = outline_color
def move_by(self, dx, dy):
"""
What comes in:
-- self
-- an int amount to move in the x direction
-- an int amount to move in the y direction
What goes out: Nothing (i.e., None).
Side effects:
-- Moves both h_rect and v_rect the specified dx and dy amounts.
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.attach_to(window)
window.render(0.5)
t1.move_by(100, 200) # Moves the T 100 pixels right and 200 down.
window.render() # necessary to see the change
Type hints:
:type dx: int
:type dy: int
"""
# --------------------------------------------------------------
# DONE: 6.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# move_by.pdf. Note: the pdf shows the different locations
# that the T moves through, but there is only 1 T at any moment.
# --------------------------------------------------------------
self.h_rect.corner_1.move_by(dx, dy)
self.v_rect.corner_1.move_by(dx, dy)
self.h_rect.corner_2.move_by(dx, dy)
self.v_rect.corner_2.move_by(dx, dy)
def clone(self):
"""
What comes in:
-- self
What goes out:
-- Returns a new CapitalT that is located in the same position as
this CapitalT with the same colors for the rectangles.
Side effects:
-- None
Example:
window = rg.RoseWindow()
t1 = CapitalT(rg.Point(300, 50), 100, 200, 20)
t1.set_color('red', 'blue')
t2 = t1.clone() # t2 is at the same location WITH THE SAME COLORS
Type hints:
:rtype: CapitalT
"""
# --------------------------------------------------------------
# DONE: 7.
# READ the above specification, including the Example.
# Implement and test this method by uncommenting the appropriate
# run_test method in main. Compare the graphics window to
# clone.pdf.
# --------------------------------------------------------------
a = CapitalT(self.intersection.clone(), self.width, self.height, self.thickness)
a.h_rect.fill_color = self.h_rect.fill_color
a.h_rect.outline_color = self.h_rect.outline_color
a.v_rect.fill_color = self.h_rect.fill_color
a.v_rect.outline_color = self.h_rect.outline_color
return a
# ----------------------------------------------------------------------
# If this module is running at the top level (as opposed to being
# imported by another module), then call the 'main' function.
# ----------------------------------------------------------------------
if __name__ == '__main__':
main()
|
py | 1a35d1bfc02268220fe6b8c601a05e2afb36280c | import RPi.GPIO as ir
print "PIN 18 Low"
ir.setwarnings(False)
ir.setmode(ir.BOARD)
ir.setup(18,ir.OUT)
ir.output(18,ir.LOW)
|
py | 1a35d1c02da1b89974b9ca5265082a3664b1be59 | # coding: utf-8
"""
APIs RISKAMERICA
A continuación les presentamos la documentación las **APIs** **de** **RiskAmerica**, el cual es un servicio pagado ofrecido por RiskAmerica que se contrata por separado a nuestras otras ofertas de software. Algunas consideraciones que debe tener al momento de usar las APIs: - El APIKEY o Token lo puede conseguir solicitándolo al equipo comercial de RiskAmerica - El request necesita ser enviado con el header **Accept:** **application/json** para que responda en formato **JSON** (de no ser enviado con esto se responderá en formato **XML**) - Todos los Servicios son **REST** y sus parametros pueden ser enviados tanto en **POST** como **GET** - El uso de las APIs puede llevar un cobro asociado según se pacte en el acuerdo comercial, por lo que le recomendamos ser cuidadosos en el uso de éstas para evitar sobre-cargos innecesarios. - RiskAmerica funciona con un mecanismo de **WhiteList** **de** **IPs** para las consultas de las API. Para habilitar o modificar la lista de IPs permitidas debe contactarse al mail **[email protected]**. # noqa: E501
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import riam_api_client
from riam_api_client.models.inline_response20025_message import InlineResponse20025Message # noqa: E501
from riam_api_client.rest import ApiException
class TestInlineResponse20025Message(unittest.TestCase):
"""InlineResponse20025Message unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testInlineResponse20025Message(self):
"""Test InlineResponse20025Message"""
# FIXME: construct object with mandatory attributes with example values
# model = riam_api_client.models.inline_response20025_message.InlineResponse20025Message() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
py | 1a35d333701b225e040f2e290c578c390a929707 | from xception_model import XceptionModel
from glob import glob
import sys
sys.path.append('../')
# Main
def main():
# Setup parameters
data_dir = "../../data/"
images_dir = data_dir + "assets/images/"
checkpoint_dir = data_dir + "saved_models/"
weights_path = data_dir + "saved_models/best_xception_model.hdf5"
breeds_path = data_dir + "assets/dog_breeds.txt"
bottleneck_features_path = data_dir + "assets/bottleneck_features/DogXceptionData.npz"
xception_model = XceptionModel()
# Train model
# xception_model.learn(images_dir,
# bottleneck_file=None,
# checkpoint_dir=checkpoint_dir)
# Load Pretrained weights
xception_model.load_pretrained_model(weights_path, breeds_path)
img_path1 = "/Users/irvinodjuana/Desktop/rosie.png"
img_path2 = "/Users/irvinodjuana/Downloads/cat2.jpeg"
img_path3 = "/Users/irvinodjuana/Downloads/linkedin_pic.png"
# Test breed predictions
predictions = xception_model.predict_file(img_path1)
print(predictions)
# Test dog detection
print("Rosie is a dog: ", xception_model.detect_dog(img_path1)) # True
print("Cat is a dog: ", xception_model.detect_dog(img_path2)) # False
print("Irvino is a dog: ", xception_model.detect_dog(img_path3)) # False
# count = 0
# dogs = 0
# for file in glob(images_dir + "test/**/*.jpg")[:20]:
# count += 1
# if xception_model.detect_dog(file):
# dogs += 1
# print(f"Percentage of dogs detected in train: {dogs}/{count}")
if __name__ == "__main__":
main()
|
py | 1a35d43fd623775ae53786e5a9fc9b9b50ccacfa | # Define here the models for your spider middleware
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
from scrapy import signals
# useful for handling different item types with a single interface
from itemadapter import is_item, ItemAdapter
class QuotesOrcSpiderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(self, response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(self, response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, or item objects.
for i in result:
yield i
def process_spider_exception(self, response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Request or item objects.
pass
def process_start_requests(self, start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
class QuotesOrcDownloaderMiddleware:
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the downloader middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_request(self, request, spider):
# Called for each request that goes through the downloader
# middleware.
# Must either:
# - return None: continue processing this request
# - or return a Response object
# - or return a Request object
# - or raise IgnoreRequest: process_exception() methods of
# installed downloader middleware will be called
return None
def process_response(self, request, response, spider):
# Called with the response returned from the downloader.
# Must either;
# - return a Response object
# - return a Request object
# - or raise IgnoreRequest
return response
def process_exception(self, request, exception, spider):
# Called when a download handler or a process_request()
# (from other downloader middleware) raises an exception.
# Must either:
# - return None: continue processing this exception
# - return a Response object: stops process_exception() chain
# - return a Request object: stops process_exception() chain
pass
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
|
py | 1a35d4ae893952565944634c59e25697acb11582 | import logging
import growattServer
import datetime
logger = logging.getLogger(__name__.rsplit(".")[-1])
class Growatt:
# Growatt EMS Module
# Fetches Consumption and Generation details from Growatt API
import requests
import time
cacheTime = 10
config = None
configConfig = None
configGrowatt = None
batterySOC = 0
consumedW = 0
fetchFailed = False
generatedW = 0
lastFetch = 0
master = None
password = None
session = None
status = False
timeout = 2
username = None
useBatteryAt = None
useBatteryTill = None
batteryMaxOutput = None
dischargingTill = None
useBatteryBefore = None
now = None
def __init__(self, master):
self.master = master
self.config = master.config
self.configConfig = master.config.get("config", {})
self.configGrowatt = master.config["sources"].get("Growatt", {})
self.password = self.configGrowatt.get("password", "")
self.status = self.configGrowatt.get("enabled", False)
self.username = self.configGrowatt.get("username", "")
self.useBatteryAt = float(self.configGrowatt.get("useBatteryAt", 0))
self.useBatteryTill = float(self.configGrowatt.get("useBatteryTill", 0))
self.batteryMaxOutput = float(self.configGrowatt.get("batteryMaxOutput", 0))
timestring = self.configGrowatt.get("useBatteryBefore", "00:00")
timelist = timestring.split(":")
self.useBatteryBefore = datetime.time(int(timelist[0]), int(timelist[1]))
self.discharginTill = self.useBatteryAt
self.now = datetime.datetime.now().time()
# Unload if this module is disabled or misconfigured
if (not self.status) or (not self.username or not self.password):
self.master.releaseModule("lib.TWCManager.EMS", "Growatt")
return None
def getConsumption(self): # gets called by TWCManager.py
if not self.status:
logger.debug("EMS Module Disabled. Skipping getConsumption")
return 0
# Perform updates if necessary
self.update()
# Return consumption value
return self.consumedW
def getGeneration(self): # gets called by TWCManager.py
if not self.status:
logger.debug("EMS Module Disabled. Skipping getGeneration")
return 0
# Perform updates if necessary
self.update()
# Return generation value
return self.generatedW
def getGenerationValues(self):
if not self.status:
logger.debug("EMS Module Disabled. Skipping getGeneration")
return 0
api = growattServer.GrowattApi()
try:
logger.debug("Fetching Growatt EMS sensor values")
login_response = api.login(self.username, self.password)
except Exception as e:
logger.log(
logging.INFO4, "Error connecting to Growatt to fetching sensor values"
)
logger.debug(str(e))
self.fetchFailed = True
return False
if not login_response:
logger.log(logging.INFO4, "Empty Response from Growatt API")
return False
if login_response:
plant_list = api.plant_list(login_response["userId"])["data"][0]
plant_ID = plant_list["plantId"]
inverter = api.device_list(plant_ID)[0]
deviceAilas = inverter["deviceAilas"]
status = api.mix_system_status(deviceAilas, plant_ID)
plant_info = api.plant_info(plant_ID)
device = plant_info["deviceList"][0]
device_sn = device["deviceSn"]
mix_status = api.mix_system_status(device_sn, plant_ID)
self.batterySOC = float(mix_status["SOC"])
gen_calc = float(status["pPv1"]) + float(status["pPv2"])
gen_calc *= 1000
gen_api = float(status["ppv"]) * 1000
inTime = (
self.now > datetime.time(00, 00) and self.now < self.useBatteryBefore
)
if self.discharginTill < self.batterySOC and inTime:
self.discharginTill = self.useBatteryTill
self.generatedW = gen_api + self.batteryMaxOutput
else:
self.discharginTill = self.useBatteryAt
self.generatedW = gen_api
self.consumedW = float(status["pLocalLoad"]) * 1000
else:
logger.log(logging.INFO4, "No response from Growatt API")
def setCacheTime(self, cacheTime):
self.cacheTime = cacheTime
def setTimeout(self, timeout):
self.timeout = timeout
def update(self):
# Update function - determine if an update is required
self.now = datetime.datetime.now().time()
if (int(self.time.time()) - self.lastFetch) > self.cacheTime:
# Cache has expired. Fetch values from Growatt.
self.getGenerationValues()
# Update last fetch time
if self.fetchFailed is not True:
self.lastFetch = int(self.time.time())
return True
else:
# Cache time has not elapsed since last fetch, serve from cache.
return False
|
py | 1a35d544e72c7088c912216a9155e6a733f9f752 | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
try:
from PIL import Image
except ImportError:
Image = None
@python_2_unicode_compatible
class CaseTestModel(models.Model):
integer = models.IntegerField()
integer2 = models.IntegerField(null=True)
string = models.CharField(max_length=100, default='')
big_integer = models.BigIntegerField(null=True)
binary = models.BinaryField(default=b'')
boolean = models.BooleanField(default=False)
comma_separated_integer = models.CommaSeparatedIntegerField(max_length=100, default='')
date = models.DateField(null=True, db_column='date_field')
date_time = models.DateTimeField(null=True)
decimal = models.DecimalField(max_digits=2, decimal_places=1, null=True, db_column='decimal_field')
duration = models.DurationField(null=True)
email = models.EmailField(default='')
file = models.FileField(null=True, db_column='file_field')
file_path = models.FilePathField(null=True)
float = models.FloatField(null=True, db_column='float_field')
if Image:
image = models.ImageField(null=True)
generic_ip_address = models.GenericIPAddressField(null=True)
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField(null=True)
positive_small_integer = models.PositiveSmallIntegerField(null=True)
slug = models.SlugField(default='')
small_integer = models.SmallIntegerField(null=True)
text = models.TextField(default='')
time = models.TimeField(null=True, db_column='time_field')
url = models.URLField(default='')
uuid = models.UUIDField(null=True)
fk = models.ForeignKey('self', null=True)
def __str__(self):
return "%i, %s" % (self.integer, self.string)
@python_2_unicode_compatible
class O2OCaseTestModel(models.Model):
o2o = models.OneToOneField(CaseTestModel, related_name='o2o_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.o2o)
@python_2_unicode_compatible
class FKCaseTestModel(models.Model):
fk = models.ForeignKey(CaseTestModel, related_name='fk_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.fk)
@python_2_unicode_compatible
class Client(models.Model):
REGULAR = 'R'
GOLD = 'G'
PLATINUM = 'P'
ACCOUNT_TYPE_CHOICES = (
(REGULAR, 'Regular'),
(GOLD, 'Gold'),
(PLATINUM, 'Platinum'),
)
name = models.CharField(max_length=50)
registered_on = models.DateField()
account_type = models.CharField(
max_length=1,
choices=ACCOUNT_TYPE_CHOICES,
default=REGULAR,
)
def __str__(self):
return self.name
|
py | 1a35d64fa773f5ef043d65b6aaa53a44688efa60 | import os
import time
from argparse import ArgumentParser
from django.conf import settings
from django.core.management import call_command
from rest_base.commands import BaseCommand
from rest_base.settings import base_settings
class Command(BaseCommand):
help = (
'Load predefined model instances'
)
def add_arguments(self, parser: ArgumentParser):
parser.add_argument('model', type=str, help='Specifies the model to load in the format of app_label.ModelName')
parser.add_argument(
'-f', '--filename', nargs='?', type=str, help='Specifies the file name of dumps (default: ModelName.json)')
def handle(self, *args, **options):
model: str = options['model']
filename: str = options['filename']
if filename is None:
filename = f"{model.split('.')[-1]}.json"
try:
base_dir = settings.BASE_DIR
except AttributeError as e:
raise AttributeError('BASE_DIR must be defined in Django settings') from e
path = os.path.join(base_dir, base_settings.PREDEFINED_ROOT, filename)
t = time.time()
self.log(f'load {model} instances from:')
self.log(path)
call_command('loaddata', path)
self.log(f'done ({time.time() - t:.2f} s)')
|
py | 1a35d6b44479b9571686bede7dfd588f4b439136 | from rest_framework import serializers
from .models import *
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Projects
fields = ('project_title', 'project_description', 'project_image', 'Author', 'pub_date', 'link', 'country', 'author_profile')
class ProfileSerializer(serializers.ModelSerializer):
class Meta:
model = Profile
fields = ('user', 'bio', 'photo',)
|
py | 1a35d6ed35a762b8cc19f1f588a496253991bc9d | from behave import *
from behave_pandas import table_to_dataframe
use_step_matcher("parse")
@given('a gherkin table as input')
def step_impl(context, ):
context.input = context.table
@when('converted to a data frame using {column_levels:d} row as column names and {index_levels:d} column as index')
def step_impl(context, column_levels, index_levels):
context.parsed = table_to_dataframe(context.input, column_levels=column_levels, index_levels=index_levels)
@when("attempting to convert to a data frame using "
"{column_levels:d} row as column names and {index_levels:d} column as index")
def step_impl(context, column_levels, index_levels):
try:
context.parsed = table_to_dataframe(context.input, column_levels=column_levels, index_levels=index_levels)
except Exception as e:
context.exception = e
|
py | 1a35d6fa592968cacea06358b68bd147bb63426c | from pathlib import Path
import mlflow
import tensorflow as tf
import yaml
from loguru import logger
from tensorflow.keras.models import load_model
from utils import get_sorted_runs
with open("configs/params.yaml") as reproducibility_params:
mlflow_config = yaml.safe_load(reproducibility_params)["mlflow"]
experiment_name = mlflow_config["experiment_name"]
def load_model_artifact() -> tf.keras.Model:
"""Load artifacts for a particular `run_id`.
Args:
run_id (str): ID of the run to load model artifacts from.
Returns:
Artifacts needed for inference.
"""
all_runs = get_sorted_runs(
experiment_name=experiment_name,
order_by=["metrics.val_loss ASC"],
)
print(
all_runs[
[
"run_id",
"tags.mlflow.runName",
"metrics.val_categorical_accuracy",
"metrics.val_loss",
]
],
)
best_run = all_runs.iloc[0]["run_id"]
logger.info(f"Best run id is : {best_run}")
# Load model
run = mlflow.get_run(run_id=best_run)
homedir = Path(run.info.artifact_uri).parent.parent.parent.parent
root = Path(run.info.artifact_uri).relative_to(homedir)
model_url = Path(root) / Path("model/data") / "model.h5"
model = load_model(model_url)
logger.info(f"Model loaded from {run.info}")
return model
if __name__ == "__main__":
load_model_artifact()
|
py | 1a35d8cc8b3bbe1e20e56d99ce52c09b6a9e3b82 | from typing import Dict
from .base import APIObject, APIList, Session, getSessionType, DEFAULT_URL, q
from .kv import KV
from . import users
from . import objects
from .notifications import Notifications
from functools import partial
class App(APIObject):
props = {"name", "description", "icon", "settings", "settings_schema"}
def __init__(
self, access_token: str, url: str = DEFAULT_URL, session="sync", cached_data={}
):
appid = "self"
if isinstance(session, Session):
# Treat the session as already initialized, meaning that the access token is actually
# the app id
appid = access_token
super().__init__(
f"api/apps/{q(appid)}", {"app": appid}, session, cached_data=cached_data
)
else:
# Initialize the app object as a direct API
s = getSessionType(session, "self", url)
s.setAccessToken(access_token)
super().__init__("api/apps/self", {"app": appid}, s)
# The objects belonging to the app
self.objects = objects.Objects({"app": appid}, self.session)
# Key-value store associated with the app
self.kv = KV(f"api/kv/apps/{q(appid)}", self.session)
@property
def owner(self):
return self.session.f(
self.read(), lambda x: users.User(x["owner"], self.session)
)
class Apps(APIList):
def __init__(self, constraints: Dict, session: Session):
super().__init__("api/apps", constraints, session)
def __getitem__(self, item):
return self._getitem(
item, f=lambda x: App(x["id"], session=self.session, cached_data=x)
)
def __call__(self, **kwargs):
return self._call(
f=lambda x: [
App(xx["id"], session=self.session, cached_data=xx) for xx in x
],
**kwargs,
)
def create(self, name, **kwargs):
return self._create(
f=lambda x: App(x["id"], session=self.session, cached_data=x),
**{"name": name, **kwargs},
)
|
py | 1a35d8d864d6966c3ccb734f636611b77660db95 | # Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from cli_tools.pinboard.pinboard import Main
|
py | 1a35d915ac759a3a318ce54633b828dd1e9f5664 | # coding=utf-8
from subprocess import PIPE, DEVNULL, STDOUT, check_output, check_call, CalledProcessError
from utilities import mongolog, command_success, command_error, filedel
import os
import re
import datetime
from pprint import pprint
import inspect
#import urllib.parse
externalreposdir = "/etc/apt/sources.list.d/"
def aptupdate():
logid = mongolog( locals() )
try:
command = ['apt-get', 'update']
check_call(command)
except CalledProcessError as e:
return command_error( e, command, logid )
return command_success( logid=logid )
#Se icludesummary è True allora aggiunge alla lista restituita anche le informazioni sull'applicazione
def listinstalled( summary=False ):
options = '-f=${binary:Package};${Version};${Architecture}' + ( ';${binary:Summary}\n' if summary else '\n' )
command = ['dpkg-query', options, '-W']
try:
output = check_output(command, stderr=PIPE, universal_newlines=True).splitlines()
except CalledProcessError as e:
return command_error( e, command )
except FileNotFoundError as e:
return command_error( e, command )
#Lista di chiavi per le informazioni sull'app
keys = ['name', 'version', 'architecture']
if summary: keys.append('summary')
#Conterrà una lista di dizionari con tutte le app installate nel sistema
pkgs = list()
#Inserimento valori nella lista apps()
for i in output:
appinfo = i.split(';')
pkgs.append( dict( zip(keys, appinfo) ) )
return command_success( data=pkgs )
#Ricerca di una applicazione. se namesonly è true (default) la ricerca viene effettuata solo nel nome del pacchetto
def aptsearch( pkgname, namesonly=True ):
#Cannot search on empty string
if not pkgname:
command_error( returncode=255, stderr='Empty search string not allowed' )
command = ['apt-cache', 'search', pkgname]
if namesonly: command.append('--names-only')
try:
output = check_output(command, stderr=PIPE, universal_newlines=True).splitlines()
except CalledProcessError as e:
return command_error( e, command )
keys = ['name', 'desc']
pkgs = list()
for i in output:
appinfo = i.split(' - ')
pkgs.append( dict( zip(keys, appinfo) ) )
return command_success( data=pkgs )
#onlydependencies option is used from other functions in this same file
#Shows package information
#Returns: List
def aptshow(pkgname, onlydependences=False):
mode = 'depends' if onlydependences else 'show'
try:
command = ['apt-cache', mode, pkgname]
output = check_output(command, stderr=PIPE, universal_newlines=True)
except CalledProcessError as e:
return command_error( e, command )
if onlydependences:
#Remove the first line (header)
toreturn = re.sub('^.*\\n', '', output)
else:
#On multiple results only keep the first one
output = output.split('\n\n')[0]
output = output.splitlines() #<-- We use splitlines() here because onlydependences does not need a split-lined output
#Check whether the package is installed or not
isinstalled = None
try:
command = ['dpkg', '-s', pkgname]
check_call(command)
except CalledProcessError as e:
isinstalled = False
if isinstalled is None: isinstalled = True
#Removing useless lines
linestomantain = ['Package:', 'Version:', 'Priority:', 'Section:', 'Origin:', 'Installed-Size:', 'Depends:', 'Description', ' ']
output = list( filter( lambda line: any( line.startswith(s) for s in linestomantain), output ) )
#Merging all of descrition lines
i = 0
n = len(output)
while i < n:
if output[i].startswith(' '):
output[i-1] = output[i-1] + output[i] #<-- Merge lines
del output[i] #<-- Delete current line
n -= 1
else:
i += 1
#Converting list to dictionary
toreturn = dict()
for line in output:
dictelems = line.split(':', maxsplit=1)
toreturn.update({ dictelems[0] : dictelems[1] })
#Is this package installed?
toreturn.update({ 'Installed' : 1 if isinstalled else 0 })
return command_success( data=toreturn )
def aptinstall(pkgname):
logid = mongolog( locals(), {'dependencies' : aptshow(pkgname,onlydependences=True)} )
command = ['apt-get', 'install', '-y', pkgname]
environ = {'DEBIAN_FRONTEND': 'noninteractive', 'PATH': os.environ.get('PATH')}
try:
check_call( command, env=environ ) #, stdout=open(os.devnull, 'wb'), stderr=STDOUT)
except CalledProcessError:
return command_error( returncode=14, stderr='Package installation error. Package name: "'+pkgname+'"', logid=logid )
return command_success( logid=logid )
#Allows user to remove system packages using apt-get remove.
#If purge == True then launch "apt-get remove --purge" instead
def aptremove(pkgname, purge=False):
logid = mongolog( locals(), {'dependencies' : aptshow(pkgname,onlydependences=True)} )
command = ['apt-get', 'purge' if purge else 'remove', '-y', pkgname]
environ = {'DEBIAN_FRONTEND': 'noninteractive', 'PATH': os.environ.get('PATH')}
try:
check_call( command, env=environ ) #stdout=open(os.devnull, 'wb'), stderr=STDOUT)
except CalledProcessError as e:
return command_error( e, command, logid )
return command_success( logid=logid )
#Returns external repos added to system in folder /etc/apt/sources.list.d/
def getexternalrepos():
repospath = '/etc/apt/sources.list.d/'
reposfiles = os.listdir(repospath)
#Removing file that ends with '.save'
reposfiles = list( filter( lambda item: not item.endswith('.save'), reposfiles ) )
#List to return
repos = list()
for filename in reposfiles:
with open(repospath + filename) as opened:
repos.append({
'filename': filename,
'lines': opened.read()
})
return command_success( data=repos )
def getreponame(): return 'nomodo-' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
#returns <string> containing filename where repo is added
def addrepo( content, name ):
logid = mongolog( locals() )
filename = '/etc/apt/sources.list.d/' + name + '.list'
repofile = open( filename, 'a')
repofile.write(content + '\n')
repofile.close()
return command_success( logid=logid )
def removerepofile(filename):
result = filedel( externalreposdir + filename )['logid']
filedel( externalreposdir + filename + '.save' ) #Ignores errors if file not exists ignoring return dictionary
logid = mongolog( locals() )
repospath = '/etc/apt/sources.list.d/'
try:
os.remove(repospath + filename + '.list')
os.remove(repospath + filename + '.list.save')
except FileNotFoundError:
return command_error( returncode=10, stderr='File to remove not found: "'+repospath+'"', logid=logid )
if result['returncode'] is 0:
return command_succes( logid=logid )
else:
return result
|
py | 1a35daa931838713843f8412f265d9399f4166da | # -*- coding: utf-8 -*-
# Created by restran on 2016/12/4
# https://github.com/RyanKung/rc4-python3/blob/master/rc4/rc4.py
__all__ = ['encrypt', 'decrypt']
def crypt(data: bytes, key: bytes) -> bytes:
"""RC4 algorithm"""
x = 0
box = list(range(256))
for i in range(256):
x = (x + int(box[i]) + int(key[i % len(key)])) % 256
box[i], box[x] = box[x], box[i]
print(len(data))
x = y = 0
out = []
for char in data:
x = (x + 1) % 256
y = (y + box[x]) % 256
box[x], box[y] = box[y], box[x]
t = char ^ box[(box[x] + box[y]) % 256]
out.append(t)
return bytes(bytearray(out))
def encrypt(data: str, key: str) -> bytes:
"""RC4 encryption with random salt and final encoding"""
data = crypt(data.encode(), key.encode())
return data
def decrypt(data: bytes, key: str) -> bytes:
"""RC4 decryption of encoded data"""
return crypt(data, key.encode())
def main():
# 需要加密的数据
data = 'UUyFTj8PCzF6geFn6xgBOYSvVTrbpNU4OF9db9wMcPD1yDbaJw=='
# 密钥
key = 'welcometoicqedu'
# 加码
encoded_data = encrypt(data=data, key=key)
print(encoded_data)
# 解码
decoded_data = decrypt(data=encoded_data, key=key)
print(decoded_data)
if __name__ == '__main__':
main()
|
py | 1a35daf60991f81b11f5ab0af243a24b35b7216e | import os
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
class SmoothDialog(QDialog):
def __init__(self, parent=None, flag=0):
super(SmoothDialog, self).__init__(parent)
self.flag = flag
self.setWindowTitle('smoothDialog')
# 在布局中添加控件
layout = QVBoxLayout(self)
self.form = QFormLayout(self)
if flag == 0:
self.smoothTypeLabel = QLabel(self)
self.smoothTypeLabel.setText("滤波类型")
self.smoothTypeCB = QComboBox(self)
self.smoothTypeCB.addItem("均值滤波")
self.smoothTypeCB.addItem("方框滤波")
self.smoothTypeCB.addItem("高斯滤波")
self.smoothTypeCB.addItem("中值滤波")
self.smoothTypeCB.addItem("双边滤波")
self.smoothTypeCB.currentIndexChanged.connect(self.selectionchange)
self.ksizeLabel = QLabel(self)
self.ksizeLabel.setText("滤波核大小")
self.ksizeLabel.setFocus()
self.ksizeLine = QLineEdit(self)
self.ksizeLine.setProperty("name", "smoothKsizeLine")
self.ksizeLine.setPlaceholderText("滤波核形如(5,5)")
self.form.addRow(self.smoothTypeLabel, self.smoothTypeCB)
self.form.addRow(self.ksizeLabel, self.ksizeLine)
else:
# self.ksizeLabel = QLabel(self)
# self.ksizeLabel.setText("修正值")
# self.ksizeLine = QLineEdit(self)
self.kernelLabel = QPushButton(self)
self.kernelLabel.setText("导入卷积核")
self.kernelLabel.clicked.connect(self.importImage)
self.kernelLine = QLineEdit(self)
# self.form.addRow(self.ksizeLabel, self.ksizeLine)
self.form.addRow(self.kernelLabel, self.kernelLine)
layout.addLayout(self.form)
buttons = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel, Qt.Horizontal, self)
buttons.accepted.connect(self.accept)
buttons.rejected.connect(self.reject)
layout.addWidget(buttons)
def getData(self):
if self.flag == 0:
if self.smoothTypeCB.currentText() == "均值滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text()
elif self.smoothTypeCB.currentText() == "方框滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text(), self.ddepthLine.text()
elif self.smoothTypeCB.currentText() == "高斯滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text(), self.sigmaXLine.text(), self.sigmaYLine.text()
elif self.smoothTypeCB.currentText() == "中值滤波":
return self.smoothTypeCB.currentText(), self.ksizeLine.text()
else:
return self.smoothTypeCB.currentText(), self.ksizeLine.text(), self.sigmaXLine.text(), self.sigmaYLine.text()
else:
return self.kernelLine.text()
def selectionchange(self, i):
for row in range(int(self.form.count() / 2) - 1, 1, -1):
self.form.removeRow(row)
self.ksizeLabel.setText("滤波核大小")
if i == 1:
self.ddepthLabel = QLabel(self)
self.ddepthLabel.setText("处理结果图像的图像深度")
self.ddepthLine = QLineEdit(self)
self.form.addRow(self.ddepthLabel, self.ddepthLine)
elif i == 2:
self.sigmaXLabel = QLabel(self)
self.sigmaXLabel.setText("卷积核在水平方向上的标准差")
self.sigmaXLine = QLineEdit(self)
self.sigmaXLine.setText("0")
self.sigmaYLabel = QLabel(self)
self.sigmaYLabel.setText("卷积核在垂直方向上的标准差")
self.sigmaYLine = QLineEdit(self)
self.sigmaYLine.setText("0")
self.form.addRow(self.sigmaXLabel, self.sigmaXLine)
self.form.addRow(self.sigmaYLabel, self.sigmaYLine)
elif i == 3:
self.ksizeLine.setPlaceholderText("滤波核大小,形如5")
elif i == 4:
self.ksizeLabel.setText("以当前像素点为中心点的直径")
self.ksizeLine.setPlaceholderText("空间距离参数")
self.sigmaXLabel = QLabel(self)
self.sigmaXLabel.setText("颜色差值范围")
self.sigmaXLine = QLineEdit(self)
self.sigmaYLabel = QLabel(self)
self.sigmaYLabel.setText("坐标空间中sigma值")
self.sigmaYLine = QLineEdit(self)
self.form.addRow(self.sigmaXLabel, self.sigmaXLine)
self.form.addRow(self.sigmaYLabel, self.sigmaYLine)
def importImage(self):
imgName, imgType = QFileDialog.getOpenFileName(self, "上传核", os.getcwd(), "All Files(*)")
self.kernelLine.setText(imgName)
|
py | 1a35db68bca3415a8fd07f81226af27d76003772 | import random
import string
from importlib import import_module
from typing import List
from protobuf_gen.transpiler import build, BuildProps, InputModule, OutputModule
def _load_protoc_mods(
output_files: List[InputModule],
root_autogen: str,
):
# we just need to say "map this definition module to a new one"
output_mods: List[OutputModule] = []
clear_mods = []
for of in output_files:
to_import = root_autogen + '.' + of.mod
m = import_module(to_import)
clear_mods.append(m)
output_mods += [of.to_output(m.DESCRIPTOR)]
return output_mods
def wrap(
output_dir_wrappers='./wrappers',
root_autogen='autogen',
output_files: List[InputModule] = None,
):
build(
BuildProps(
root_autogen,
_load_protoc_mods(
output_files,
root_autogen,
)
),
outdir=output_dir_wrappers
)
|
py | 1a35dbfd510df44838f0bdf9ce352bd7acde44e0 | from unittest.mock import patch
import pytest
from click.testing import CliRunner
from todoman.cli import cli
from todoman.configuration import ConfigurationException
from todoman.configuration import load_config
def test_explicit_nonexistant(runner):
result = CliRunner().invoke(
cli,
env={"TODOMAN_CONFIG": "/nonexistant"},
catch_exceptions=True,
)
assert result.exception
assert "Configuration file /nonexistant does not exist" in result.output
def test_xdg_nonexistant(runner):
with patch("xdg.BaseDirectory.xdg_config_dirs", ["/does-not-exist"]):
result = CliRunner().invoke(
cli,
catch_exceptions=True,
)
assert result.exception
assert "No configuration file found" in result.output
def test_xdg_existant(runner, tmpdir, config):
with tmpdir.mkdir("todoman").join("config.py").open("w") as f:
with config.open() as c:
f.write(c.read())
with patch("xdg.BaseDirectory.xdg_config_dirs", [str(tmpdir)]):
result = CliRunner().invoke(
cli,
catch_exceptions=True,
)
assert not result.exception
assert not result.output.strip()
def test_sane_config(config, runner, tmpdir):
config.write(
'color = "auto"\n'
'date_format = "%Y-%m-%d"\n'
f'path = "{tmpdir}"\n'
f'cache_path = "{tmpdir.join("cache.sqlite")}"\n'
)
result = runner.invoke(cli)
# This is handy for debugging breakage:
if result.exception:
print(result.output)
raise result.exception
assert not result.exception
def test_invalid_color(config, runner):
config.write('color = 12\npath = "/"\n')
result = runner.invoke(cli, ["list"])
assert result.exception
assert (
"Error: Bad color setting. Invalid type (expected str, got int)."
in result.output
)
def test_invalid_color_arg(config, runner):
config.write('path = "/"\n')
result = runner.invoke(cli, ["--color", "12", "list"])
assert result.exception
assert "Usage:" in result.output
def test_missing_path(config, runner):
config.write('color = "auto"\n')
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Error: Missing 'path' setting." in result.output
@pytest.mark.xfail(reason="Not implemented")
def test_extra_entry(config, runner):
config.write("color = auto\ndate_format = %Y-%m-%d\npath = /\nblah = false\n")
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Error: Invalid configuration entry" in result.output
@pytest.mark.xfail(reason="Not implemented")
def test_extra_section(config, runner):
config.write("date_format = %Y-%m-%d\npath = /\n[extra]\ncolor = auto\n")
result = runner.invoke(cli, ["list"])
assert result.exception
assert "Invalid configuration section" in result.output
def test_missing_cache_dir(config, runner, tmpdir):
cache_dir = tmpdir.join("does").join("not").join("exist")
cache_file = cache_dir.join("cache.sqlite")
config.write(f'path = "{tmpdir}/*"\ncache_path = "{cache_file}"\n')
result = runner.invoke(cli)
assert not result.exception
assert cache_dir.isdir()
assert cache_file.isfile()
def test_date_field_in_time_format(config, runner, tmpdir):
config.write('path = "/"\ntime_format = "%Y-%m-%d"\n')
result = runner.invoke(cli)
assert result.exception
assert (
"Found date component in `time_format`, please use `date_format` for that."
in result.output
)
def test_date_field_in_time(config, runner, tmpdir):
config.write('path = "/"\ndate_format = "%Y-%d-:%M"\n')
result = runner.invoke(cli)
assert result.exception
assert (
"Found time component in `date_format`, please use `time_format` for that."
in result.output
)
def test_colour_validation_auto(config):
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
):
cfg = load_config()
assert cfg["color"] == "auto"
def test_colour_validation_always(config):
config.write("color = 'always'\n", "a")
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
):
cfg = load_config()
assert cfg["color"] == "always"
def test_colour_validation_invalid(config):
config.write("color = 'on_weekends_only'\n", "a")
with patch(
"todoman.configuration.find_config",
return_value=(str(config)),
), pytest.raises(ConfigurationException):
load_config()
|
py | 1a35dc7e15161491a679983f998ea8f836b1bc08 | # coding: utf-8
"""
Ed-Fi Operational Data Store API
The Ed-Fi ODS / API enables applications to read and write education data stored in an Ed-Fi ODS through a secure REST interface. *** > *Note: Consumers of ODS / API information should sanitize all data for display and storage. The ODS / API provides reasonable safeguards against cross-site scripting attacks and other malicious content, but the platform does not and cannot guarantee that the data it contains is free of all potentially harmful content.* *** # noqa: E501
OpenAPI spec version: 3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from swagger_client.api_client import ApiClient
class SchoolsApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_schools(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schools_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_schools_with_http_info(**kwargs) # noqa: E501
return data
def get_schools_with_http_info(self, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['offset', 'limit', 'fields', 'q', 'school_id', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schools" % key
)
params[key] = val
del params['kwargs']
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/enrollment/Schools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[School]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schools_by_id(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_id(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: School
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schools_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.get_schools_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def get_schools_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Retrieves a specific resource using the resource's identifier (using the \"Get By Id\" pattern). # noqa: E501
This GET operation retrieves a resource by the specified resource identifier. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_id_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str id: A resource identifier that uniquely identifies the resource. (required)
:param str if_none_match: The previously returned ETag header value, used here to prevent the unnecessary data transfer of an unchanged resource.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: School
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id', 'if_none_match', 'fields', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schools_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in params or
params['id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `id` when calling `get_schools_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
header_params = {}
if 'if_none_match' in params:
header_params['If-None-Match'] = params['if_none_match'] # noqa: E501
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/enrollment/Schools/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='School', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schools_by_local_education_agency(self, local_education_agency_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_local_education_agency(local_education_agency_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str local_education_agency_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schools_by_local_education_agency_with_http_info(local_education_agency_id, **kwargs) # noqa: E501
else:
(data) = self.get_schools_by_local_education_agency_with_http_info(local_education_agency_id, **kwargs) # noqa: E501
return data
def get_schools_by_local_education_agency_with_http_info(self, local_education_agency_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_local_education_agency_with_http_info(local_education_agency_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str local_education_agency_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['local_education_agency_id', 'offset', 'limit', 'fields', 'q', 'school_id', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schools_by_local_education_agency" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'local_education_agency_id' is set
if self.api_client.client_side_validation and ('local_education_agency_id' not in params or
params['local_education_agency_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `local_education_agency_id` when calling `get_schools_by_local_education_agency`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_local_education_agency`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_local_education_agency`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'local_education_agency_id' in params:
path_params['localEducationAgency_id'] = params['local_education_agency_id'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/enrollment/localEducationAgencies/{localEducationAgency_id}/schools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[School]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schools_by_section(self, section_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_section(section_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str section_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schools_by_section_with_http_info(section_id, **kwargs) # noqa: E501
else:
(data) = self.get_schools_by_section_with_http_info(section_id, **kwargs) # noqa: E501
return data
def get_schools_by_section_with_http_info(self, section_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_section_with_http_info(section_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str section_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['section_id', 'offset', 'limit', 'fields', 'q', 'school_id', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schools_by_section" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'section_id' is set
if self.api_client.client_side_validation and ('section_id' not in params or
params['section_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `section_id` when calling `get_schools_by_section`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_section`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_section`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'section_id' in params:
path_params['section_id'] = params['section_id'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/enrollment/sections/{section_id}/schools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[School]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schools_by_staff(self, staff_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_staff(staff_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str staff_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schools_by_staff_with_http_info(staff_id, **kwargs) # noqa: E501
else:
(data) = self.get_schools_by_staff_with_http_info(staff_id, **kwargs) # noqa: E501
return data
def get_schools_by_staff_with_http_info(self, staff_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_staff_with_http_info(staff_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str staff_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['staff_id', 'offset', 'limit', 'fields', 'q', 'school_id', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schools_by_staff" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'staff_id' is set
if self.api_client.client_side_validation and ('staff_id' not in params or
params['staff_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `staff_id` when calling `get_schools_by_staff`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_staff`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_staff`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'staff_id' in params:
path_params['staff_id'] = params['staff_id'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/enrollment/staffs/{staff_id}/schools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[School]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_schools_by_student(self, student_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_student(student_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str student_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_schools_by_student_with_http_info(student_id, **kwargs) # noqa: E501
else:
(data) = self.get_schools_by_student_with_http_info(student_id, **kwargs) # noqa: E501
return data
def get_schools_by_student_with_http_info(self, student_id, **kwargs): # noqa: E501
"""Retrieves specific resources using the resource's property values (using the \"Get\" pattern). # noqa: E501
This GET operation provides access to resources using the \"Get\" search pattern. The values of any properties of the resource that are specified will be used to return all matching results (if it exists). # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_schools_by_student_with_http_info(student_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str student_id: (required)
:param int offset: Indicates how many items should be skipped before returning results.
:param int limit: Indicates the maximum number of items that should be returned in the results.
:param str fields: Specifies a subset of properties that should be returned for each entity (e.g. \"property1,collection1(collProp1,collProp2)\").
:param str q: Specifies a query filter expression for the request. Currently only supports range-based queries on dates and numbers (e.g. \"schoolId:[255901000...255901002]\" and \"BeginDate:[2016-03-07...2016-03-10]\").
:param int school_id: The identifier assigned to a school.
:param str snapshot_identifier: Indicates the Snapshot-Identifier that should be used.
:return: list[School]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['student_id', 'offset', 'limit', 'fields', 'q', 'school_id', 'snapshot_identifier'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_schools_by_student" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'student_id' is set
if self.api_client.client_side_validation and ('student_id' not in params or
params['student_id'] is None): # noqa: E501
raise ValueError("Missing the required parameter `student_id` when calling `get_schools_by_student`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] > 500): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_student`, must be a value less than or equal to `500`") # noqa: E501
if self.api_client.client_side_validation and ('limit' in params and params['limit'] < 0): # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_schools_by_student`, must be a value greater than or equal to `0`") # noqa: E501
collection_formats = {}
path_params = {}
if 'student_id' in params:
path_params['student_id'] = params['student_id'] # noqa: E501
query_params = []
if 'offset' in params:
query_params.append(('offset', params['offset'])) # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
if 'fields' in params:
query_params.append(('fields', params['fields'])) # noqa: E501
if 'q' in params:
query_params.append(('q', params['q'])) # noqa: E501
if 'school_id' in params:
query_params.append(('schoolId', params['school_id'])) # noqa: E501
header_params = {}
if 'snapshot_identifier' in params:
header_params['Snapshot-Identifier'] = params['snapshot_identifier'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_client_credentials'] # noqa: E501
return self.api_client.call_api(
'/ed-fi/enrollment/students/{student_id}/schools', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[School]', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | 1a35dcdde4713c34f619e82a31b6674daa478140 | from conans.client.generators.cmake import DepsCppCmake
from conans.model import Generator
class CMakePathsGenerator(Generator):
name = "cmake_paths"
@property
def filename(self):
return "conan_paths.cmake"
@property
def content(self):
lines = []
# The CONAN_XXX_ROOT variables are needed because the FindXXX.cmake or XXXConfig.cmake
# in a package could have been "patched" with the `cmake.patch_config_paths()`
# replacing absolute paths with CONAN_XXX_ROOT variables.
for _, dep_cpp_info in self.deps_build_info.dependencies:
var_name = "CONAN_{}_ROOT".format(dep_cpp_info.get_name(self.name).upper())
lines.append('set({} {})'.format(var_name, DepsCppCmake(dep_cpp_info,
self.name).rootpath))
# We want to prioritize the FindXXX.cmake files:
# 1. First the files found in the packages
# 2. The previously set (by default CMAKE_MODULE_PATH is empty)
# 3. The "install_folder" ones, in case there is no FindXXX.cmake, try with the install dir
# if the user used the "cmake_find_package" will find the auto-generated
# 4. The CMake installation dir/Modules ones.
deps = DepsCppCmake(self.deps_build_info, self.name)
lines.append("set(CMAKE_MODULE_PATH {deps.build_paths} ${{CMAKE_MODULE_PATH}} "
"${{CMAKE_CURRENT_LIST_DIR}})".format(deps=deps))
lines.append("set(CMAKE_PREFIX_PATH {deps.build_paths} ${{CMAKE_PREFIX_PATH}} "
"${{CMAKE_CURRENT_LIST_DIR}})".format(deps=deps))
return "\n".join(lines)
|
py | 1a35de980e93c80c611fc11bfcbceb66be3a3fde | #!/usr/bin/env python
from setuptools import setup
VERSION = "0.1.2"
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="target-couchbase",
version=VERSION,
description="Load data on Couchbase via singer.io",
long_description=long_description,
long_description_content_type="text/markdown",
author="Daigo Tanaka, Anelen Co., LLC",
url="https://github.com/anelendata/target_couchbase",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Operating System :: MacOS :: MacOS X",
"Operating System :: POSIX :: Linux",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
install_requires=[
"singer-python>=5.2.0",
"couchbase>=3.0.8",
"simplejson==3.11.1",
"setuptools>=40.3.0",
],
entry_points="""
[console_scripts]
target-couchbase=target_couchbase:main
""",
packages=["target_couchbase"],
package_data={
# Use MANIFEST.ini
},
include_package_data=True
)
|
py | 1a35deebac65e3442d15d6d38f517545a53d7665 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 11:52:26 2018
@author: student
"""
import sys
import random
# import sys, random
class BankAccount():
min_acc_balance = 0
def __init__(self):
self.acc_balance = 0
def get_details(self, acc_type='Savings'):
self.name = input('Enter customer name: ')
self.acc_number = random.randint(100, 150)
self.acc_type = acc_type
def deposit_amount(self, amount):
self.acc_balance += amount
def withdraw_amount(self, amount):
if self.acc_balance > amount:
self.acc_balance -= amount
else:
print('Insufficient Funds to withdraw')
def get_balance(self):
return self.acc_balance
def display_acc_details(self):
print(self.acc_number,"\t", self.name,"\t", self.get_balance())
def Display(accounts):
for account in accounts.values():
account.display_acc_details()
def main():
accounts = {}
while True:
print('1. Open the bank account')
print('2. Withdraw amount')
print('3. Deposit amount')
print('4. Display details')
print('5. Exit')
choice = int(input('Enter choice: '))
if choice == 1:
account = BankAccount()
account.get_details()
accounts[account.acc_number] = account
elif choice == 2:
acc = int(input('Withdraw account: '))
amount = int(input('Enter amount to withdraw: '))
accounts[acc].withdraw_amount(amount)
elif choice == 3:
acc = int(input('Deposit account: '))
amount = int(input('Enter amount to deposit: '))
accounts[acc].deposit_amount(amount)
elif choice == 4:
print('Account\tName\tBalance')
Display(accounts)
else:
sys.exit('Done')
if __name__ == '__main__':
main() |
py | 1a35defe3b5ee75ddef4a11617f9d5e3c3b9a2c8 | # bsl, 2016
import xbmc
import xbmcaddon
import json
import sys
__addon__ = xbmcaddon.Addon()
__addonname__ = __addon__.getAddonInfo('name')
__icon__ = __addon__.getAddonInfo('icon')
REMOTE_DBG = False
if REMOTE_DBG:
try:
import pydevd
pydevd.settrace(stdoutToServer=True, stderrToServer=True)
except:
xbmcgui.Dialog().ok(addonname, "debug mode not workng")
sys.exit(1)
req = xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Settings.GetSettings","id":1}')
jsonRPCRes = json.loads(req);
settingsList = jsonRPCRes["result"]["settings"]
audioSetting = [item for item in settingsList if item["id"] == "audiooutput.audiodevice"][0]
audioDeviceOptions = audioSetting["options"];
activeAudioDeviceValue = audioSetting["value"];
activeAudioDeviceId = [index for (index, option) in enumerate(audioDeviceOptions) if option["value"] == activeAudioDeviceValue][0];
nextIndex = ( activeAudioDeviceId + 1 ) % len(audioDeviceOptions)
nextValue = audioDeviceOptions[nextIndex]["value"]
nextName = audioDeviceOptions[nextIndex]["label"]
changeReq = xbmc.executeJSONRPC('{"jsonrpc":"2.0","method":"Settings.SetSettingValue","params":{"setting":"audiooutput.audiodevice","value":"%s"},"id":1}' % nextValue)
try:
changeResJson = json.loads(changeReq);
if changeResJson["result"] != True:
raise Exception
except:
sys.stderr.write("Error switching audio output device")
raise Exception
xbmc.executebuiltin('Notification("%s","Output-Device: %s",2000,"%s")' % (__addonname__, nextName, __icon__ ))
|
py | 1a35df73166bbb11cb1be7ed750334a60e3a5383 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Sang T. Truong"
__copyright__ = "Copyright 2021, The incomevis project"
__credits__ = ["Sang T. Truong"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "Sang T. Truong"
__email__ = "[email protected]"
__status__ = "Dev"
from .getColor import *
from .getDecile import *
from .getPercentile import *
from .getStateName import *
from .path import *
|
py | 1a35e0320e3985a2f856aa3e1639437645377398 | # shared global variables to be imported from model also
import numpy as np
import os
UNK = "$UNK$"
NUM = "$NUM$"
NONE = "o"
class DataSet(object):
def __init__(self, filepath, vocab_words=None, vocab_tags=None, max_iter=None, lower=True, allow_unk=True):
self.filepath = filepath
self.max_iter = max_iter
# Process data setting
self.vocab_words = vocab_words
self.vocab_tags = vocab_tags
# default setting
self.length = None
self.lower = lower
self.allow_unk = allow_unk
# assign function
if self.vocab_words and self.vocab_tags:
self.proc_word = process_vocab(self.vocab_words, lower=self.lower, allow_unk=self.allow_unk) # define a funciton
self.proc_tag = process_vocab(self.vocab_tags, lower=self.lower, allow_unk=self.allow_unk) # define a funciton
def __iter__(self):
num_iter = 0 # represent sentence target
with open(self.filepath, encoding="utf8", errors='ignore') as f:
next(f) # Pass the column name line
sent_id = None
words, tags = [], []
for line in f:
line = line.strip().split()
sent_tmp_id = line[1]
# print("sentence:",sent_tmp_id)
# judge initial sentence id status
if not sent_id: # initital assign of sentence id
sent_id = sent_tmp_id
# judge if same sentence
if sent_tmp_id != sent_id: # new sentence
yield words, tags # change sentence, yield iterator
num_iter += 1
if self.max_iter is not None and niter > self.max_iter:
break
sent_id = sent_tmp_id
words, tags = [], []
try:
word, tag = line[2], line[4]
if self.lower == True:
word, tag = word.lower(), tag.lower()
except:
print("wrong line,line content:", line)
continue
if self.vocab_words and self.vocab_tags:
word = self.proc_word(word)
tag = self.proc_tag(tag)
words.append(word)
tags.append(tag)
# print(word,tag)
def __len__(self):
"""Iterates once over the corpus to set and store length"""
if self.length is None:
self.length = 0
for _ in self:
self.length += 1
return self.length
def get_trimmed_glove_vectors(filename):
"""
Args:
filename: path to the npz file
Returns:
matrix of embeddings (np array)
"""
try:
with np.load(filename) as data:
return data["embeddings"]
except IOError:
raise MyIOError(filename)
def process_vocab(vocab, lower=True, allow_unk=True):
def proc(key):
if lower:
key = key.lower()
if key.isdigit():
key = '$NUM$'
if key in vocab:
key = vocab[key]
else:
if allow_unk:
#print(key,"not in vocab ")
key = vocab["$UNK$"]
else:
raise Exception("unknow key is not allowed, sth is wrong")
return key # return a processed key eg: I to I's id 30
return proc # return a function
def build_vocabs(datasets):
"""Build vocabulary from an iterable of datasets objects
Args:
datasets: a list of dataset objects
Returns:
a set of all the words in the dataset
"""
print("Building vocab...")
vocab_words = set()
vocab_tags = set()
for dataset in datasets:
for words, tags in dataset:
vocab_words.update(words)
vocab_tags.update(tags)
print("- done. {} tokens".format(len(vocab_words)))
return vocab_words, vocab_tags
def build_glove_vocab(filename):
"""Load vocab from file
Args:
filename: path to the glove vectors
Returns:
vocab: set() of strings
"""
print("Building vocab...")
vocab = set()
with open(filename) as f:
for line in f:
word = line.strip().split(' ')[0]
vocab.add(word)
print("- done. {} tokens".format(len(vocab)))
return vocab
def write_vocab(vocab, filename):
"""Writes a vocab to a file
Writes one word per line.
Args:
vocab: iterable that yields word
filename: path to vocab file
Returns:
write a word per line
"""
print("Writing vocab...")
with open(filename, "w") as f:
for i, word in enumerate(vocab):
if i != len(vocab) - 1:
f.write("{}\n".format(word))
else:
f.write(word)
print("- done. {} tokens".format(len(vocab)))
def load_vocab(filename):
"""Loads vocab from a file
Args:
filename: (string) the format of the file must be one word per line.
Returns:
d: dict[word] = index
"""
try:
d = dict()
with open(filename) as f:
for idx, word in enumerate(f):
word = word.strip()
d[word] = idx
except IOError:
raise MyIOError(filename)
return d
def export_trimmed_glove_vectors(vocab, glove_filename, trimmed_filename, dim):
"""Saves glove vectors in numpy array
Args:
vocab: dictionary vocab[word] = index
glove_filename: a path to a glove file
trimmed_filename: a path where to store a matrix in npy
dim: (int) dimension of embeddings
"""
embeddings = np.zeros([len(vocab), dim])
with open(glove_filename) as f:
for line in f:
line = line.strip().split(' ')
word = line[0]
embedding = [float(x) for x in line[1:]]
if word in vocab:
word_idx = vocab[word]
embeddings[word_idx] = np.asarray(embedding)
np.savez_compressed(trimmed_filename, embeddings=embeddings)
"""RNN part"""
def _pad_sequences(sequences, pad_tok, max_length):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
Returns:
a list of list where each sublist has same length
"""
sequence_padded, sequence_length = [], []
for seq in sequences:
seq = list(seq)
seq_ = seq[:max_length] + [pad_tok] * max(max_length - len(seq), 0)
sequence_padded += [seq_]
sequence_length += [min(len(seq), max_length)]
return sequence_padded, sequence_length
def pad_sequences(sequences, pad_tok, nlevels=1):
"""
Args:
sequences: a generator of list or tuple
pad_tok: the char to pad with
nlevels: "depth" of padding, for the case where we have characters ids
Returns:
a list of list where each sublist has same length
"""
if nlevels == 1:
max_length = max(map(lambda x: len(x), sequences))
sequence_padded, sequence_length = _pad_sequences(sequences,
pad_tok, max_length)
elif nlevels == 2:
max_length_word = max([max(map(lambda x: len(x), seq))
for seq in sequences])
sequence_padded, sequence_length = [], []
for seq in sequences:
# all words are same length now
sp, sl = _pad_sequences(seq, pad_tok, max_length_word)
sequence_padded += [sp]
sequence_length += [sl]
max_length_sentence = max(map(lambda x: len(x), sequences))
sequence_padded, _ = _pad_sequences(sequence_padded,
[pad_tok] * max_length_word, max_length_sentence)
sequence_length, _ = _pad_sequences(sequence_length, 0,
max_length_sentence)
return sequence_padded, sequence_length
def minibatches(data, minibatch_size):
"""
Args:
data: generator of (sentence, tags) tuples
minibatch_size: (int)
Yields:
list of tuples
"""
x_batch, y_batch = [], []
for (x, y) in data:
if len(x_batch) == minibatch_size:
yield x_batch, y_batch
x_batch, y_batch = [], []
if type(x[0]) == tuple:
x = zip(*x)
x_batch += [x]
y_batch += [y]
if len(x_batch) != 0:
yield x_batch, y_batch
def get_chunk_type(tok, idx_to_tag):
"""
Args:
tok: id of token, ex 4
idx_to_tag: dictionary {4: "B-PER", ...}
Returns:
tuple: "B", "PER"
"""
tag_name = idx_to_tag[tok]
tag_class = tag_name.split('-')[0]
tag_type = tag_name.split('-')[-1]
return tag_class, tag_type
def get_chunks(seq, tags):
"""Given a sequence of tags, group entities and their position
Args:
seq: [4, 4, 0, 0, ...] sequence of labels
tags: dict["O"] = 4
Returns:
list of (chunk_type, chunk_start, chunk_end)
Example:
seq = [4, 5, 0, 3]
tags = {"B-PER": 4, "I-PER": 5, "B-LOC": 3}
result = [("PER", 0, 2), ("LOC", 3, 4)]
"""
default = tags[NONE]
idx_to_tag = {idx: tag for tag, idx in tags.items()}
chunks = []
chunk_type, chunk_start = None, None
for i, tok in enumerate(seq):
# End of a chunk 1
if tok == default and chunk_type is not None:
# Add a chunk.
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = None, None
# End of a chunk + start of a chunk!
elif tok != default:
tok_chunk_class, tok_chunk_type = get_chunk_type(tok, idx_to_tag)
if chunk_type is None:
chunk_type, chunk_start = tok_chunk_type, i
elif tok_chunk_type != chunk_type or tok_chunk_class == "B":
chunk = (chunk_type, chunk_start, i)
chunks.append(chunk)
chunk_type, chunk_start = tok_chunk_type, i
else:
pass
# end condition
if chunk_type is not None:
chunk = (chunk_type, chunk_start, len(seq))
chunks.append(chunk)
return chunks
|
py | 1a35e0cbf61f1dddb7022efae0e19687d18e7692 | #!/usr/bin/env python3
import json
from typing import List
import urllib3
from blessings import Terminal
from github import Github
from github.Repository import Repository
from utils import get_env_var, timestamped_print
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print = timestamped_print
REVIEWERS = ['bziser', 'GuyAfik', 'yucohen']
MARKETPLACE_CONTRIBUTION_PR_AUTHOR = 'xsoar-bot'
WELCOME_MSG = 'Thank you for your contribution. Your generosity and caring are unrivaled! Rest assured - our content ' \
'wizard @{selected_reviewer} will very shortly look over your proposed changes.'
WELCOME_MSG_WITH_GFORM = 'Thank you for your contribution. Your generosity and caring are unrivaled! Make sure to ' \
'register your contribution by filling the [Contribution Registration]' \
'(https://forms.gle/XDfxU4E61ZwEESSMA) form, ' \
'so our content wizard @{selected_reviewer} will know he can start review the proposed ' \
'changes.'
def determine_reviewer(potential_reviewers: List[str], repo: Repository) -> str:
"""Checks the number of open 'Contribution' PRs that have either been assigned to a user or a review
was requested from the user for each potential reviewer and returns the user with the smallest amount
Args:
potential_reviewers (List): The github usernames from which a reviewer will be selected
repo (Repository): The relevant repo
Returns:
str: The github username to assign to a PR
"""
label_to_consider = 'contribution'
pulls = repo.get_pulls(state='OPEN')
assigned_prs_per_potential_reviewer = {reviewer: 0 for reviewer in potential_reviewers}
for pull in pulls:
# we only consider 'Contribution' prs when computing who to assign
pr_labels = [label.name.casefold() for label in pull.labels]
if label_to_consider not in pr_labels:
continue
assignees = {assignee.login for assignee in pull.assignees}
requested_reviewers, _ = pull.get_review_requests()
reviewers_info = {requested_reviewer.login for requested_reviewer in requested_reviewers}
combined_list = assignees.union(reviewers_info)
for reviewer in potential_reviewers:
if reviewer in combined_list:
assigned_prs_per_potential_reviewer[reviewer] = assigned_prs_per_potential_reviewer.get(reviewer, 0) + 1
selected_reviewer = sorted(assigned_prs_per_potential_reviewer, key=assigned_prs_per_potential_reviewer.get)[0]
return selected_reviewer
def main():
"""Handles External PRs (PRs from forks)
Performs the following operations:
1. If the external PR's base branch is master we create a new branch and set it as the base branch of the PR.
2. Labels the PR with the "Contribution" label. (Adds the "Hackathon" label where applicable.)
3. Assigns a Reviewer.
4. Creates a welcome comment
Will use the following env vars:
- CONTENTBOT_GH_ADMIN_TOKEN: token to use to update the PR
- EVENT_PAYLOAD: json data from the pull_request event
"""
t = Terminal()
payload_str = get_env_var('EVENT_PAYLOAD')
if not payload_str:
raise ValueError('EVENT_PAYLOAD env variable not set or empty')
payload = json.loads(payload_str)
print(f'{t.cyan}Processing PR started{t.normal}')
org_name = 'demisto'
repo_name = 'content'
gh = Github(get_env_var('CONTENTBOT_GH_ADMIN_TOKEN'), verify=False)
content_repo = gh.get_repo(f'{org_name}/{repo_name}')
pr_number = payload.get('pull_request', {}).get('number')
pr = content_repo.get_pull(pr_number)
# Add 'Contribution' Label to PR
contribution_label = 'Contribution'
pr.add_to_labels(contribution_label)
print(f'{t.cyan}Added "Contribution" label to the PR{t.normal}')
# check base branch is master
if pr.base.ref == 'master':
print(f'{t.cyan}Determining name for new base branch{t.normal}')
branch_prefix = 'contrib/'
new_branch_name = f'{branch_prefix}{pr.head.label.replace(":", "_")}'
existant_branches = content_repo.get_git_matching_refs(f'heads/{branch_prefix}')
potential_conflicting_branch_names = [branch.ref.lstrip('refs/heads/') for branch in existant_branches]
# make sure new branch name does not conflict with existing branch name
while new_branch_name in potential_conflicting_branch_names:
# append or increment digit
if not new_branch_name[-1].isdigit():
new_branch_name += '-1'
else:
digit = str(int(new_branch_name[-1]) + 1)
new_branch_name = f'{new_branch_name[:-1]}{digit}'
master_branch_commit_sha = content_repo.get_branch('master').commit.sha
# create new branch
print(f'{t.cyan}Creating new branch "{new_branch_name}"{t.normal}')
content_repo.create_git_ref(f'refs/heads/{new_branch_name}', master_branch_commit_sha)
# update base branch of the PR
pr.edit(base=new_branch_name)
print(f'{t.cyan}Updated base branch of PR "{pr_number}" to "{new_branch_name}"{t.normal}')
# assign reviewers / request review from
reviewer_to_assign = determine_reviewer(REVIEWERS, content_repo)
pr.add_to_assignees(reviewer_to_assign)
pr.create_review_request(reviewers=[reviewer_to_assign])
print(f'{t.cyan}Assigned user "{reviewer_to_assign}" to the PR{t.normal}')
print(f'{t.cyan}Requested review from user "{reviewer_to_assign}"{t.normal}')
# create welcome comment (only users who contributed through Github need to have that contribution form filled)
message_to_send = WELCOME_MSG if pr.user.login == MARKETPLACE_CONTRIBUTION_PR_AUTHOR else WELCOME_MSG_WITH_GFORM
body = message_to_send.format(selected_reviewer=reviewer_to_assign)
pr.create_issue_comment(body)
print(f'{t.cyan}Created welcome comment{t.normal}')
if __name__ == "__main__":
main()
|
py | 1a35e17dce2015845427e629b3b759b6407229b6 | """
WSGI config for leaveProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "leaveProject.settings")
application = get_wsgi_application()
|
py | 1a35e2686a324ad600305fe76b4c565318fc18eb | import pytest
from abridger.extraction_model import Relation
from abridger.schema import SqliteSchema
from test.unit.extractor.base import TestExtractorBase
class TestExtractorSubjectRelationReProcessingIncoming(TestExtractorBase):
@pytest.fixture()
def schema1(self):
for stmt in [
'''
CREATE TABLE test1 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test2 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1
);
''', '''
CREATE TABLE test3 (
id INTEGER PRIMARY KEY,
test2_id INTEGER REFERENCES test2
);
''', '''
CREATE TABLE test4 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1,
test2_id INTEGER REFERENCES test2
);
''',
]:
self.database.execute(stmt)
return SqliteSchema.create_from_conn(self.database.connection)
@pytest.fixture()
def data1(self, schema1):
table1 = schema1.tables[0]
table2 = schema1.tables[1]
table3 = schema1.tables[2]
table4 = schema1.tables[3]
rows = [
(table1, (1,)),
(table2, (1, 1)),
(table3, (1, 1)),
(table4, (1, 1, 1)),
]
self.database.insert_rows(rows)
return rows
def test_re_processing(self, schema1, data1):
# 1 <- 2 <- 3
# ^ ^
# \ /
# 4
# The extractor algorithm goes breadth first. In this example,
# the test2 table is hit twice. However the first time it is hit,
# it has less relationships, so it won't pull in test3.
# The second subject includes test3 and test4. test2 will only get
# processed when test2 has already been seen by subject 1.
# This test ensures that test2 is re-processed due to subject 2
# having more relationships.
rel21 = {'table': 'test2', 'column': 'test1_id'}
rel32 = {'table': 'test3', 'column': 'test2_id'}
rel41 = {'table': 'test4', 'column': 'test1_id'}
extraction_model_data = [
# This subject won't include test3, only test 2
{
'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': [rel21]},
]
},
# This subject will include test3 via test4
{
'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': [rel41, rel32]},
]
}
]
self.check_launch(schema1, extraction_model_data, data1)
class TestExtractorTwoSubjectTwoColumnNulling(TestExtractorBase):
TEST_CASES = []
for i in (True, False):
for j in (True, False):
for k in (True, False):
for l in (True, False):
TEST_CASES.append([i, j, k, l])
@pytest.fixture()
def schema1(self):
for stmt in [
'''
CREATE TABLE test1 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test2 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1,
test3_id INTEGER REFERENCES test3,
test5_id INTEGER REFERENCES test5
);
''', '''
CREATE TABLE test3 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test5 (
id INTEGER PRIMARY KEY
);
''', '''
CREATE TABLE test4 (
id INTEGER PRIMARY KEY,
test1_id INTEGER REFERENCES test1,
test2_id INTEGER REFERENCES test2
);
''',
]:
self.database.execute(stmt)
return SqliteSchema.create_from_conn(self.database.connection)
@pytest.fixture()
def data1(self, schema1):
table1 = schema1.tables[0]
table2 = schema1.tables[1]
table3 = schema1.tables[2]
table4 = schema1.tables[3]
table5 = schema1.tables[4]
rows = [
(table1, (1,)),
(table3, (1,)),
(table5, (1,)),
(table2, (1, 1, 1, 1)),
(table4, (1, 1, 1)),
]
self.database.insert_rows(rows)
return rows
@pytest.mark.parametrize('i, j, k, l', TEST_CASES)
def test_nulling(self, schema1, data1, i, j, k, l):
# 5
# ^
# /
# 1 <- 2 -> 3
# ^ ^
# \ /
# 4
# The extractor algorithm goes breadth first. By testing with two
# subjects, things can be rigged so that the test2 table is processed
# twice, with different relationships.
#
# This tests checks that two outgoing relations on the test2 table
# are processed correctly. If a row in test3 or test5 is not needed,
# then the column on test2 should be made null.
#
# The 16 combinations are:
# relationship from 2-> 3 enabled/disabled for subject 1 -- i
# relationship from 2-> 3 enabled/disabled for subject 1 -- j
# relationship from 2-> 5 enabled/disabled for subject 2 -- k
# relationship from 2-> 5 enabled/disabled for subject 2 -- l
table2 = schema1.tables[1]
rel21 = {'table': 'test2', 'column': 'test1_id'}
rel41 = {'table': 'test4', 'column': 'test1_id'}
# Outgoing relations are enabled by default.
rel23d = {'table': 'test2', 'column': 'test3_id', 'disabled': True,
'type': Relation.TYPE_OUTGOING}
rel25d = {'table': 'test2', 'column': 'test5_id', 'disabled': True,
'type': Relation.TYPE_OUTGOING}
# Incoming relations
relations = [[rel21], [rel41]]
# Disable outgoing relations
if not i:
relations[0].append(rel23d)
if not j:
relations[0].append(rel25d)
if not k:
relations[1].append(rel23d)
if not l:
relations[1].append(rel25d)
expect3 = 1 if i or k else None # Expect a non-None in test3_id
expect5 = 1 if j or l else None # Expect a non-None in test5_id
expected_data = data1[0:1] + data1[4:5]
expected_data.append((table2, (1, 1, expect3, expect5)))
if expect3:
expected_data += data1[1:2] # Expect a row in test3
if expect5:
expected_data += data1[2:3] # Expect a row in test5
extraction_model_data = [
{'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': relations[0]}]},
{'subject': [
{'tables': [{'table': 'test1'}]},
{'relations': relations[1]}]}
]
self.check_launch(schema1, extraction_model_data, expected_data)
|
py | 1a35e36af1882cb52e90fee4b191fe5294280900 | import unittest
from mock import patch, call, Mock
import update_data_after_sync
class FakeCollection(object):
def find(self):
return [
{'topic_id': 'UKGOVUK_1', '_id': 'https://www.gov.uk/feed?a=b&c=d', 'created': '2013-08-01T12:53:31Z'},
{'topic_id': 'UKGOVUK_2', '_id': 'https://www.gov.uk/pubs?w=x&y=z', 'created': '2015-02-26T09:57:35Z', 'disabled': True},
]
def insert(self, *args):
return True
def remove(self, topic_id):
return True
def count(self):
return 2
@patch.dict(update_data_after_sync.os.environ, {'GOVUK_WEBSITE_ROOT': 'https://integration.gov.uk'})
class UpdateDataAfterSyncTestCase(unittest.TestCase):
@patch.dict(update_data_after_sync.app.config, {'GOVDELIVERY_HOSTNAME': 'omg-production'})
def test_will_not_run_in_production(self):
with self.assertRaises(SystemExit):
update_data_after_sync.update_all_records()
@patch.object(update_data_after_sync, 'logging')
@patch.object(update_data_after_sync.db, 'topics', new_callable=FakeCollection)
@patch.object(FakeCollection, 'remove', return_value=True)
@patch.object(FakeCollection, 'insert', return_value=True)
@patch.dict(update_data_after_sync.os.environ, {'GOVDELIVERY_HOSTNAME': 'stage-api.govdelivery.com'})
@patch.dict(update_data_after_sync.app.config, {'GOVDELIVERY_ACCOUNT_CODE': 'DUPDUPDUP'})
def test_updating_all_records(self, mock_insert_record, mock_delete_record, mock_db, mock_logging):
update_data_after_sync.update_all_records()
mock_logging.info.assert_has_calls([
call('Updating 2 topics with domain integration.gov.uk and account code DUPDUPDUP'),
call('Done')
])
mock_insert_record.assert_has_calls([
call({
'_id': 'https://integration.gov.uk/feed?a=b&c=d',
'topic_id': 'DUPDUPDUP_1',
'created': '2013-08-01T12:53:31Z',
}),
call({
'_id': 'https://integration.gov.uk/pubs?w=x&y=z',
'topic_id': 'DUPDUPDUP_2',
'created' : '2015-02-26T09:57:35Z',
'disabled': True
}),
])
mock_delete_record.assert_has_calls([
call({'_id': 'https://www.gov.uk/feed?a=b&c=d'}),
call({'_id': 'https://www.gov.uk/pubs?w=x&y=z'}),
])
|
py | 1a35e386ed3fb629a05f108023a83294298074f9 | #!/usr/bin/env python
"""mergesort.py: Program to implement merge sort"""
__author__ = 'Rohit Sinha'
def merge_sort(alist):
if len(alist) <= 1:
return alist
middle = len(alist) / 2
left = alist[:middle]
right = alist[middle:]
left = merge_sort(left)
right = merge_sort(right)
return list(merge(left, right))
def merge(left, right):
result = []
left_index, right_index = 0, 0
while left_index < len(left) and right_index < len(right):
if left[left_index] <= right[right_index]:
result.append(left[left_index])
left_index += 1
else:
result.append(right[right_index])
right_index += 1
if left:
result.extend(left[left_index:])
if right:
result.extend(right[right_index:])
return result
if __name__ == '__main__':
alist = [84, 69, 76, 86, 94, 91]
alist = merge_sort(alist)
print(alist) |
py | 1a35e3d4c509b27f0c4ccb24c822500940b541f6 | #! /usr/bin/env python
#
# -*- coding: iso-8859-1 -*-
#
#
# FILE: EDS2CSV.py
# BEGIN: Nov 30,2007
# AUTHOR: Giuseppe Massimo Bertani
# EMAIL [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Coding style params:
# <TAB> skips=4
# <TAB> are replaced with blanks
#
#
import sys
import os
import ConfigParser as cp
if (len(sys.argv) != 2):
print "Usage:"
print
print "EDS2CSV.py <input file> "
print
print
sys.exit(0)
EDSname = os.path.abspath( os.path.dirname(sys.argv[1]) ) + '/' + sys.argv[1]
if (os.path.exists(EDSname) is not True):
print
print "Input file ",EDS2CSV," not found."
print
print
sys.exit(0)
eds = cp.ConfigParser()
eds.read(EDSname)
ssorted = sorted(eds.sections())
# dump entire EDS file to stdout in CSV format comma separated
print "Object Dictionary,",sys.argv[1]
print
for section in ssorted:
print section
print ",",
osorted = sorted(eds.options(section))
for option in osorted:
print option, ",",
print
print ",",
for option in osorted:
print eds.get(section, option), ",",
print
print
|
py | 1a35e4248f3b0326d496340ca88ad1b14bf3d985 | import numpy as np
import paddle
import paddle.nn as nn
import paddle.nn.functional as F
from paddle.distribution import Normal
def rsample(loc, scale):
shape = loc.shape
normal_ = paddle.nn.initializer.Normal()
eps = paddle.empty(shape, dtype=loc.dtype)
normal_(eps)
return loc + eps * scale
class Retina:
"""A visual retina.
Extracts a foveated glimpse `phi` around location `l`
from an image `x`.
Concretely, encodes the region around `l` at a
high-resolution but uses a progressively lower
resolution for pixels further from `l`, resulting
in a compressed representation of the original
image `x`.
Args:
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l: a 2D Tensor of shape (B, 2). Contains normalized
coordinates in the range [-1, 1].
g: size of the first square patch.
k: number of patches to extract in the glimpse.
s: scaling factor that controls the size of
successive patches.
Returns:
phi: a 5D tensor of shape (B, k, g, g, C). The
foveated glimpse of the image.
"""
def __init__(self, g, k, s):
self.g = g
self.k = k
self.s = s
def foveate(self, x, l):
"""Extract `k` square patches of size `g`, centered
at location `l`. The initial patch is a square of
size `g`, and each subsequent patch is a square
whose side is `s` times the size of the previous
patch.
The `k` patches are finally resized to (g, g) and
concatenated into a tensor of shape (B, k, g, g, C).
"""
phi = []
size = self.g
# extract k patches of increasing size
for i in range(self.k):
phi.append(self.extract_patch(x, l, size)) # 这op含pad
size = int(self.s * size)
# resize the patches to squares of size g
for i in range(1, len(phi)):
k = phi[i].shape[-1] // self.g
phi[i] = F.avg_pool2d(phi[i], k) # avg pool
# concatenate into a single tensor and flatten
phi = paddle.concat(phi, 1)
phi = phi.reshape([phi.shape[0], -1])
return phi
def extract_patch(self, x, l, size):
"""Extract a single patch for each image in `x`.
Args:
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l: a 2D Tensor of shape (B, 2).
size: a scalar defining the size of the extracted patch.
Returns:
patch: a 4D Tensor of shape (B, size, size, C)
"""
B, C, H, W = x.shape
start = self.denormalize(H, l)
start=start.numpy()
end = start + size
# pad with zeros
x = F.pad(x, [0,0,0,0,size // 2, size // 2, size // 2, size // 2]).numpy()
# loop through mini-batch and extract patches
patch = []
for i in range(B):
# !numpy大法好!paddle的索引op实在太慢了。1280次,torch:0.0219s,paddle:0.727s,numpy:0.00099s
subset=x[i, :, start[i, 1] : end[i, 1], start[i, 0] : end[i, 0]]
patch.append(subset)
return paddle.to_tensor(np.stack(patch))
def denormalize(self, T, coords):
"""Convert coordinates in the range [-1, 1] to
coordinates in the range [0, T] where `T` is
the size of the image.
"""
return paddle.to_tensor(0.5 * ((coords + 1.0) * T), dtype='int64')
def exceeds(self, from_x, to_x, from_y, to_y, T):
"""Check whether the extracted patch will exceed
the boundaries of the image of size `T`.
"""
if (from_x < 0) or (from_y < 0) or (to_x > T) or (to_y > T):
return True
return False
class GlimpseNetwork(nn.Layer):
"""The glimpse network.
Combines the "what" and the "where" into a glimpse
feature vector `g_t`.
- "what": glimpse extracted from the retina.
- "where": location tuple where glimpse was extracted.
Concretely, feeds the output of the retina `phi` to
a fc layer and the glimpse location vector `l_t_prev`
to a fc layer. Finally, these outputs are fed each
through a fc layer and their sum is rectified.
In other words:
`g_t = relu( fc( fc(l) ) + fc( fc(phi) ) )`
Args:
h_g: hidden layer size of the fc layer for `phi`.
h_l: hidden layer size of the fc layer for `l`.
g: size of the square patches in the glimpses extracted
by the retina.
k: number of patches to extract per glimpse.
s: scaling factor that controls the size of successive patches.
c: number of channels in each image.
x: a 4D Tensor of shape (B, H, W, C). The minibatch
of images.
l_t_prev: a 2D tensor of shape (B, 2). Contains the glimpse
coordinates [x, y] for the previous timestep `t-1`.
Returns:
g_t: a 2D tensor of shape (B, hidden_size).
The glimpse representation returned by
the glimpse network for the current
timestep `t`.
"""
def __init__(self, h_g, h_l, g, k, s, c):
super().__init__()
self.retina = Retina(g, k, s)
# glimpse layer
D_in = k * g * g * c
self.fc1 = nn.Linear(D_in, h_g)
# location layer
D_in = 2
self.fc2 = nn.Linear(D_in, h_l)
self.fc3 = nn.Linear(h_g, h_g + h_l)
self.fc4 = nn.Linear(h_l, h_g + h_l)
def forward(self, x, l_t_prev):
# generate glimpse phi from image x
phi = self.retina.foveate(x, l_t_prev)
# flatten location vector
l_t_prev = l_t_prev.reshape([l_t_prev.shape[0], -1]) # 他的锅
# feed phi and l to respective fc layers
phi_out = F.relu(self.fc1(phi))
l_out = F.relu(self.fc2(l_t_prev))
what = self.fc3(phi_out)
where = self.fc4(l_out)
# feed to fc layer
g_t = F.relu(what + where)
return g_t
class CoreNetwork(nn.Layer):
"""The core network.
An RNN that maintains an internal state by integrating
information extracted from the history of past observations.
It encodes the agent's knowledge of the environment through
a state vector `h_t` that gets updated at every time step `t`.
Concretely, it takes the glimpse representation `g_t` as input,
and combines it with its internal state `h_t_prev` at the previous
time step, to produce the new internal state `h_t` at the current
time step.
In other words:
`h_t = relu( fc(h_t_prev) + fc(g_t) )`
Args:
input_size: input size of the rnn.
hidden_size: hidden size of the rnn.
g_t: a 2D tensor of shape (B, hidden_size). The glimpse
representation returned by the glimpse network for the
current timestep `t`.
h_t_prev: a 2D tensor of shape (B, hidden_size). The
hidden state vector for the previous timestep `t-1`.
Returns:
h_t: a 2D tensor of shape (B, hidden_size). The hidden
state vector for the current timestep `t`.
"""
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size, hidden_size)
self.h2h = nn.Linear(hidden_size, hidden_size)
def forward(self, g_t, h_t_prev):
h1 = self.i2h(g_t)
h2 = self.h2h(h_t_prev)
h_t = F.relu(h1 + h2)
return h_t
class ActionNetwork(nn.Layer):
"""The action network.
Uses the internal state `h_t` of the core network to
produce the final output classification.
Concretely, feeds the hidden state `h_t` through a fc
layer followed by a softmax to create a vector of
output probabilities over the possible classes.
Hence, the environment action `a_t` is drawn from a
distribution conditioned on an affine transformation
of the hidden state vector `h_t`, or in other words,
the action network is simply a linear softmax classifier.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
h_t: the hidden state vector of the core network
for the current time step `t`.
Returns:
a_t: output probability vector over the classes.
"""
def __init__(self, input_size, output_size):
super().__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, h_t):
a_t = F.log_softmax(self.fc(h_t), axis=1)
return a_t
class LocationNetwork(nn.Layer):
"""The location network.
Uses the internal state `h_t` of the core network to
produce the location coordinates `l_t` for the next
time step.
Concretely, feeds the hidden state `h_t` through a fc
layer followed by a tanh to clip the output beween
[-1, 1]. This produces a 2D vector of means used to
parametrize a two-component Gaussian with a fixed
variance from which the location coordinates `l_t`
for the next time step are sampled.
Hence, the location `l_t` is chosen stochastically
from a distribution conditioned on an affine
transformation of the hidden state vector `h_t`.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
std: standard deviation of the normal distribution.
h_t: the hidden state vector of the core network for
the current time step `t`.
Returns:
mu: a 2D vector of shape (B, 2).
l_t: a 2D vector of shape (B, 2).
"""
def __init__(self, input_size, output_size, std):
super().__init__()
self.std = std
hid_size = input_size // 2
self.fc = nn.Linear(input_size, hid_size)
self.fc_lt = nn.Linear(hid_size, output_size)
def forward(self, h_t):
# compute mean
feat = F.relu(self.fc(h_t.detach()))
mu = paddle.tanh(self.fc_lt(feat))
# reparametrization trick
l_t = rsample(loc=mu,scale=self.std)
l_t = l_t.detach()
log_pi = Normal(mu, paddle.to_tensor(self.std)).log_prob(l_t)
# we assume both dimensions are independent
# 1. pdf of the joint is the product of the pdfs
# 2. log of the product is the sum of the logs
log_pi = paddle.sum(log_pi, axis=1)
# bound between [-1, 1]
l_t = paddle.clip(l_t, -1, 1)
return log_pi, l_t
class BaselineNetwork(nn.Layer):
"""The baseline network.
This network regresses the baseline in the
reward function to reduce the variance of
the gradient update.
Args:
input_size: input size of the fc layer.
output_size: output size of the fc layer.
h_t: the hidden state vector of the core network
for the current time step `t`.
Returns:
b_t: a 2D vector of shape (B, 1). The baseline
for the current time step `t`.
"""
def __init__(self, input_size, output_size):
super().__init__()
self.fc = nn.Linear(input_size, output_size)
def forward(self, h_t):
b_t = self.fc(h_t.detach())
return b_t
|
py | 1a35e5082434dba5bc48161214b8207b6eadebb9 | from Moildev import Moildev
import unittest
import json
class TestMoildev(unittest.TestCase):
image = "../SourceImage/image.jpg"
camera_parameter = 'camera_parameters/camera_parameters.json'
camera_type = "Raspi"
with open(camera_parameter) as f:
data = json.load(f)
if camera_type in data.keys():
camera = data[camera_type]["cameraName"]
sensor_width = data[camera_type]['cameraSensorWidth']
sensor_height = data[camera_type]['cameraSensorHeight']
Icx = data[camera_type]['iCx']
Icy = data[camera_type]['iCy']
ratio = data[camera_type]['ratio']
imageWidth = data[camera_type]['imageWidth']
imageHeight = data[camera_type]['imageHeight']
calibrationRatio = data[camera_type]['calibrationRatio']
parameter0 = data[camera_type]['parameter0']
parameter1 = data[camera_type]['parameter1']
parameter2 = data[camera_type]['parameter2']
parameter3 = data[camera_type]['parameter3']
parameter4 = data[camera_type]['parameter4']
parameter5 = data[camera_type]['parameter5']
moildev = Moildev(camera_parameter, camera_type)
def test_test(self):
self.moildev.test()
self.assertTrue(True)
def test_get_Icx(self):
self.assertEqual(self.moildev.get_Icx(), self.Icx)
def test_get_Icy(self):
self.assertEqual(self.moildev.get_Icy(), self.Icy)
def test_imageWidth(self):
self.assertEqual(self.moildev.get_imageWidth(), self.imageWidth)
def test_getAlphabeta(self):
alpha, beta = self.moildev.get_alpha_beta(500,500, 2)
self.assertTrue(alpha is not None)
|
py | 1a35e53d92700d932c70c66de4a501b7d42090b1 |
#%%
2
|
py | 1a35e6621e6796a089917d985ebf3e0d3ea0c8b7 | from unittest import TestCase
from unittest.mock import ANY, MagicMock, Mock, patch
from parameterized import parameterized
from samcli.commands.sync.command import do_cli, execute_code_sync, execute_watch
from samcli.lib.providers.provider import ResourceIdentifier
from samcli.commands._utils.options import (
DEFAULT_BUILD_DIR,
DEFAULT_CACHE_DIR,
DEFAULT_BUILD_DIR_WITH_AUTO_DEPENDENCY_LAYER,
)
def get_mock_sam_config():
mock_sam_config = MagicMock()
mock_sam_config.exists = MagicMock(return_value=True)
return mock_sam_config
MOCK_SAM_CONFIG = get_mock_sam_config()
class TestDoCli(TestCase):
def setUp(self):
self.template_file = "input-template-file"
self.stack_name = "stack-name"
self.resource_id = []
self.resource = []
self.image_repository = "123456789012.dkr.ecr.us-east-1.amazonaws.com/test1"
self.image_repositories = None
self.mode = "mode"
self.s3_prefix = "s3-prefix"
self.kms_key_id = "kms-key-id"
self.notification_arns = []
self.parameter_overrides = {"a": "b"}
self.capabilities = ("CAPABILITY_IAM",)
self.tags = {"c": "d"}
self.role_arn = "role_arn"
self.metadata = {}
self.region = None
self.profile = None
self.base_dir = None
self.clean = True
self.config_env = "mock-default-env"
self.config_file = "mock-default-filename"
MOCK_SAM_CONFIG.reset_mock()
@parameterized.expand([(False, False, True), (False, False, False)])
@patch("samcli.commands.sync.command.update_experimental_context")
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.execute_code_sync")
@patch("samcli.commands.build.command.click")
@patch("samcli.commands.build.build_context.BuildContext")
@patch("samcli.commands.package.command.click")
@patch("samcli.commands.package.package_context.PackageContext")
@patch("samcli.commands.deploy.command.click")
@patch("samcli.commands.deploy.deploy_context.DeployContext")
@patch("samcli.commands.build.command.os")
@patch("samcli.commands.sync.command.manage_stack")
def test_infra_must_succeed_sync(
self,
code,
watch,
auto_dependency_layer,
manage_stack_mock,
os_mock,
DeployContextMock,
mock_deploy_click,
PackageContextMock,
mock_package_click,
BuildContextMock,
mock_build_click,
execute_code_sync_mock,
click_mock,
update_experimental_context_mock,
):
build_context_mock = Mock()
BuildContextMock.return_value.__enter__.return_value = build_context_mock
package_context_mock = Mock()
PackageContextMock.return_value.__enter__.return_value = package_context_mock
deploy_context_mock = Mock()
DeployContextMock.return_value.__enter__.return_value = deploy_context_mock
do_cli(
self.template_file,
False,
False,
self.resource_id,
self.resource,
auto_dependency_layer,
self.stack_name,
self.region,
self.profile,
self.base_dir,
self.parameter_overrides,
self.mode,
self.image_repository,
self.image_repositories,
self.s3_prefix,
self.kms_key_id,
self.capabilities,
self.role_arn,
self.notification_arns,
self.tags,
self.metadata,
self.config_file,
self.config_env,
)
build_dir = DEFAULT_BUILD_DIR_WITH_AUTO_DEPENDENCY_LAYER if auto_dependency_layer else DEFAULT_BUILD_DIR
BuildContextMock.assert_called_with(
resource_identifier=None,
template_file=self.template_file,
base_dir=self.base_dir,
build_dir=build_dir,
cache_dir=DEFAULT_CACHE_DIR,
clean=True,
use_container=False,
parallel=True,
parameter_overrides=self.parameter_overrides,
mode=self.mode,
cached=True,
create_auto_dependency_layer=auto_dependency_layer,
stack_name=self.stack_name,
)
PackageContextMock.assert_called_with(
template_file=ANY,
s3_bucket=ANY,
image_repository=self.image_repository,
image_repositories=self.image_repositories,
s3_prefix=self.s3_prefix,
kms_key_id=self.kms_key_id,
output_template_file=ANY,
no_progressbar=True,
metadata=self.metadata,
region=self.region,
profile=self.profile,
use_json=False,
force_upload=True,
)
DeployContextMock.assert_called_with(
template_file=ANY,
stack_name=self.stack_name,
s3_bucket=ANY,
image_repository=self.image_repository,
image_repositories=self.image_repositories,
no_progressbar=True,
s3_prefix=self.s3_prefix,
kms_key_id=self.kms_key_id,
parameter_overrides=self.parameter_overrides,
capabilities=self.capabilities,
role_arn=self.role_arn,
notification_arns=self.notification_arns,
tags=self.tags,
region=self.region,
profile=self.profile,
no_execute_changeset=True,
fail_on_empty_changeset=True,
confirm_changeset=False,
use_changeset=False,
force_upload=True,
signing_profiles=None,
disable_rollback=False,
)
package_context_mock.run.assert_called_once_with()
deploy_context_mock.run.assert_called_once_with()
execute_code_sync_mock.assert_not_called()
@parameterized.expand([(False, True, False)])
@patch("samcli.commands.sync.command.update_experimental_context")
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.execute_watch")
@patch("samcli.commands.build.command.click")
@patch("samcli.commands.build.build_context.BuildContext")
@patch("samcli.commands.package.command.click")
@patch("samcli.commands.package.package_context.PackageContext")
@patch("samcli.commands.deploy.command.click")
@patch("samcli.commands.deploy.deploy_context.DeployContext")
@patch("samcli.commands.build.command.os")
@patch("samcli.commands.sync.command.manage_stack")
def test_watch_must_succeed_sync(
self,
code,
watch,
auto_dependency_layer,
manage_stack_mock,
os_mock,
DeployContextMock,
mock_deploy_click,
PackageContextMock,
mock_package_click,
BuildContextMock,
mock_build_click,
execute_watch_mock,
click_mock,
update_experimental_context_mock,
):
build_context_mock = Mock()
BuildContextMock.return_value.__enter__.return_value = build_context_mock
package_context_mock = Mock()
PackageContextMock.return_value.__enter__.return_value = package_context_mock
deploy_context_mock = Mock()
DeployContextMock.return_value.__enter__.return_value = deploy_context_mock
do_cli(
self.template_file,
False,
True,
self.resource_id,
self.resource,
auto_dependency_layer,
self.stack_name,
self.region,
self.profile,
self.base_dir,
self.parameter_overrides,
self.mode,
self.image_repository,
self.image_repositories,
self.s3_prefix,
self.kms_key_id,
self.capabilities,
self.role_arn,
self.notification_arns,
self.tags,
self.metadata,
self.config_file,
self.config_env,
)
BuildContextMock.assert_called_with(
resource_identifier=None,
template_file=self.template_file,
base_dir=self.base_dir,
build_dir=DEFAULT_BUILD_DIR,
cache_dir=DEFAULT_CACHE_DIR,
clean=True,
use_container=False,
parallel=True,
parameter_overrides=self.parameter_overrides,
mode=self.mode,
cached=True,
create_auto_dependency_layer=auto_dependency_layer,
stack_name=self.stack_name,
)
PackageContextMock.assert_called_with(
template_file=ANY,
s3_bucket=ANY,
image_repository=self.image_repository,
image_repositories=self.image_repositories,
s3_prefix=self.s3_prefix,
kms_key_id=self.kms_key_id,
output_template_file=ANY,
no_progressbar=True,
metadata=self.metadata,
region=self.region,
profile=self.profile,
use_json=False,
force_upload=True,
)
DeployContextMock.assert_called_with(
template_file=ANY,
stack_name=self.stack_name,
s3_bucket=ANY,
image_repository=self.image_repository,
image_repositories=self.image_repositories,
no_progressbar=True,
s3_prefix=self.s3_prefix,
kms_key_id=self.kms_key_id,
parameter_overrides=self.parameter_overrides,
capabilities=self.capabilities,
role_arn=self.role_arn,
notification_arns=self.notification_arns,
tags=self.tags,
region=self.region,
profile=self.profile,
no_execute_changeset=True,
fail_on_empty_changeset=True,
confirm_changeset=False,
use_changeset=False,
force_upload=True,
signing_profiles=None,
disable_rollback=False,
)
execute_watch_mock.assert_called_once_with(
self.template_file, build_context_mock, package_context_mock, deploy_context_mock, auto_dependency_layer
)
@parameterized.expand([(True, False, True)])
@patch("samcli.commands.sync.command.update_experimental_context")
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.execute_code_sync")
@patch("samcli.commands.build.command.click")
@patch("samcli.commands.build.build_context.BuildContext")
@patch("samcli.commands.package.command.click")
@patch("samcli.commands.package.package_context.PackageContext")
@patch("samcli.commands.deploy.command.click")
@patch("samcli.commands.deploy.deploy_context.DeployContext")
@patch("samcli.commands.build.command.os")
@patch("samcli.commands.sync.command.manage_stack")
def test_code_must_succeed_sync(
self,
code,
watch,
auto_dependency_layer,
manage_stack_mock,
os_mock,
DeployContextMock,
mock_deploy_click,
PackageContextMock,
mock_package_click,
BuildContextMock,
mock_build_click,
execute_code_sync_mock,
click_mock,
update_experimental_context_mock,
):
build_context_mock = Mock()
BuildContextMock.return_value.__enter__.return_value = build_context_mock
package_context_mock = Mock()
PackageContextMock.return_value.__enter__.return_value = package_context_mock
deploy_context_mock = Mock()
DeployContextMock.return_value.__enter__.return_value = deploy_context_mock
do_cli(
self.template_file,
True,
False,
self.resource_id,
self.resource,
auto_dependency_layer,
self.stack_name,
self.region,
self.profile,
self.base_dir,
self.parameter_overrides,
self.mode,
self.image_repository,
self.image_repositories,
self.s3_prefix,
self.kms_key_id,
self.capabilities,
self.role_arn,
self.notification_arns,
self.tags,
self.metadata,
self.config_file,
self.config_env,
)
execute_code_sync_mock.assert_called_once_with(
self.template_file,
build_context_mock,
deploy_context_mock,
self.resource_id,
self.resource,
auto_dependency_layer,
)
class TestSyncCode(TestCase):
def setUp(self) -> None:
self.template_file = "template.yaml"
self.build_context = MagicMock()
self.deploy_context = MagicMock()
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.SamLocalStackProvider.get_stacks")
@patch("samcli.commands.sync.command.SyncFlowFactory")
@patch("samcli.commands.sync.command.SyncFlowExecutor")
@patch("samcli.commands.sync.command.get_unique_resource_ids")
def test_execute_code_sync_single_resource(
self,
get_unique_resource_ids_mock,
sync_flow_executor_mock,
sync_flow_factory_mock,
get_stacks_mock,
click_mock,
):
resource_identifier_strings = ["Function1"]
resource_types = []
sync_flows = [MagicMock()]
sync_flow_factory_mock.return_value.create_sync_flow.side_effect = sync_flows
get_unique_resource_ids_mock.return_value = {
ResourceIdentifier("Function1"),
}
execute_code_sync(
self.template_file,
self.build_context,
self.deploy_context,
resource_identifier_strings,
resource_types,
True,
)
sync_flow_factory_mock.return_value.create_sync_flow.assert_called_once_with(ResourceIdentifier("Function1"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_called_once_with(sync_flows[0])
get_unique_resource_ids_mock.assert_called_once_with(
get_stacks_mock.return_value[0], resource_identifier_strings, []
)
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.SamLocalStackProvider.get_stacks")
@patch("samcli.commands.sync.command.SyncFlowFactory")
@patch("samcli.commands.sync.command.SyncFlowExecutor")
@patch("samcli.commands.sync.command.get_unique_resource_ids")
def test_execute_code_sync_multiple_resource(
self,
get_unique_resource_ids_mock,
sync_flow_executor_mock,
sync_flow_factory_mock,
get_stacks_mock,
click_mock,
):
resource_identifier_strings = ["Function1", "Function2"]
resource_types = []
sync_flows = [MagicMock(), MagicMock()]
sync_flow_factory_mock.return_value.create_sync_flow.side_effect = sync_flows
get_unique_resource_ids_mock.return_value = {
ResourceIdentifier("Function1"),
ResourceIdentifier("Function2"),
}
execute_code_sync(
self.template_file,
self.build_context,
self.deploy_context,
resource_identifier_strings,
resource_types,
True,
)
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function1"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[0])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function2"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[1])
self.assertEqual(sync_flow_factory_mock.return_value.create_sync_flow.call_count, 2)
self.assertEqual(sync_flow_executor_mock.return_value.add_sync_flow.call_count, 2)
get_unique_resource_ids_mock.assert_called_once_with(
get_stacks_mock.return_value[0], resource_identifier_strings, []
)
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.SamLocalStackProvider.get_stacks")
@patch("samcli.commands.sync.command.SyncFlowFactory")
@patch("samcli.commands.sync.command.SyncFlowExecutor")
@patch("samcli.commands.sync.command.get_unique_resource_ids")
def test_execute_code_sync_single_type_resource(
self,
get_unique_resource_ids_mock,
sync_flow_executor_mock,
sync_flow_factory_mock,
get_stacks_mock,
click_mock,
):
resource_identifier_strings = ["Function1", "Function2"]
resource_types = ["Type1"]
sync_flows = [MagicMock(), MagicMock(), MagicMock()]
sync_flow_factory_mock.return_value.create_sync_flow.side_effect = sync_flows
get_unique_resource_ids_mock.return_value = {
ResourceIdentifier("Function1"),
ResourceIdentifier("Function2"),
ResourceIdentifier("Function3"),
}
execute_code_sync(
self.template_file,
self.build_context,
self.deploy_context,
resource_identifier_strings,
resource_types,
True,
)
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function1"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[0])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function2"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[1])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function3"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[2])
self.assertEqual(sync_flow_factory_mock.return_value.create_sync_flow.call_count, 3)
self.assertEqual(sync_flow_executor_mock.return_value.add_sync_flow.call_count, 3)
get_unique_resource_ids_mock.assert_called_once_with(
get_stacks_mock.return_value[0], resource_identifier_strings, ["Type1"]
)
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.SamLocalStackProvider.get_stacks")
@patch("samcli.commands.sync.command.SyncFlowFactory")
@patch("samcli.commands.sync.command.SyncFlowExecutor")
@patch("samcli.commands.sync.command.get_unique_resource_ids")
def test_execute_code_sync_multiple_type_resource(
self,
get_unique_resource_ids_mock,
sync_flow_executor_mock,
sync_flow_factory_mock,
get_stacks_mock,
click_mock,
):
resource_identifier_strings = ["Function1", "Function2"]
resource_types = ["Type1", "Type2"]
sync_flows = [MagicMock(), MagicMock(), MagicMock(), MagicMock()]
sync_flow_factory_mock.return_value.create_sync_flow.side_effect = sync_flows
get_unique_resource_ids_mock.return_value = {
ResourceIdentifier("Function1"),
ResourceIdentifier("Function2"),
ResourceIdentifier("Function3"),
ResourceIdentifier("Function4"),
}
execute_code_sync(
self.template_file,
self.build_context,
self.deploy_context,
resource_identifier_strings,
resource_types,
True,
)
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function1"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[0])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function2"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[1])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function3"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[2])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function4"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[3])
self.assertEqual(sync_flow_factory_mock.return_value.create_sync_flow.call_count, 4)
self.assertEqual(sync_flow_executor_mock.return_value.add_sync_flow.call_count, 4)
get_unique_resource_ids_mock.assert_any_call(
get_stacks_mock.return_value[0], resource_identifier_strings, ["Type1", "Type2"]
)
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.SamLocalStackProvider.get_stacks")
@patch("samcli.commands.sync.command.SyncFlowFactory")
@patch("samcli.commands.sync.command.SyncFlowExecutor")
@patch("samcli.commands.sync.command.get_all_resource_ids")
def test_execute_code_sync_default_all_resources(
self,
get_all_resource_ids_mock,
sync_flow_executor_mock,
sync_flow_factory_mock,
get_stacks_mock,
click_mock,
):
sync_flows = [MagicMock(), MagicMock(), MagicMock(), MagicMock()]
sync_flow_factory_mock.return_value.create_sync_flow.side_effect = sync_flows
get_all_resource_ids_mock.return_value = [
ResourceIdentifier("Function1"),
ResourceIdentifier("Function2"),
ResourceIdentifier("Function3"),
ResourceIdentifier("Function4"),
]
execute_code_sync(self.template_file, self.build_context, self.deploy_context, "", [], True)
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function1"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[0])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function2"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[1])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function3"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[2])
sync_flow_factory_mock.return_value.create_sync_flow.assert_any_call(ResourceIdentifier("Function4"))
sync_flow_executor_mock.return_value.add_sync_flow.assert_any_call(sync_flows[3])
self.assertEqual(sync_flow_factory_mock.return_value.create_sync_flow.call_count, 4)
self.assertEqual(sync_flow_executor_mock.return_value.add_sync_flow.call_count, 4)
get_all_resource_ids_mock.assert_called_once_with(get_stacks_mock.return_value[0])
class TestWatch(TestCase):
def setUp(self) -> None:
self.template_file = "template.yaml"
self.build_context = MagicMock()
self.package_context = MagicMock()
self.deploy_context = MagicMock()
@parameterized.expand([(True,), (False,)])
@patch("samcli.commands.sync.command.click")
@patch("samcli.commands.sync.command.WatchManager")
def test_execute_watch(
self,
auto_dependency_layer,
watch_manager_mock,
click_mock,
):
execute_watch(
self.template_file, self.build_context, self.package_context, self.deploy_context, auto_dependency_layer
)
watch_manager_mock.assert_called_once_with(
self.template_file, self.build_context, self.package_context, self.deploy_context, auto_dependency_layer
)
watch_manager_mock.return_value.start.assert_called_once_with()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.