ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b40484e52ef0eef1b47062c01c759518c8c541c0 | #!/usr/bin/env python3
# Copyright (c) 2014-2017 The CloudComputingChain Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mining RPCs
- getmininginfo
- getblocktemplate proposal mode
- submitblock"""
import copy
from binascii import b2a_hex
from decimal import Decimal
from test_framework.blocktools import create_coinbase
from test_framework.mininode import CBlock
from test_framework.test_framework import CloudComputingChainTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
def b2x(b):
return b2a_hex(b).decode('ascii')
def assert_template(node, block, expect, rehash=True):
if rehash:
block.hashMerkleRoot = block.calc_merkle_root()
rsp = node.getblocktemplate({'data': b2x(block.serialize()), 'mode': 'proposal'})
assert_equal(rsp, expect)
class MiningTest(CloudComputingChainTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = False
def run_test(self):
node = self.nodes[0]
self.log.info('getmininginfo')
mining_info = node.getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['chain'], 'regtest')
assert_equal(mining_info['currentblocktx'], 0)
assert_equal(mining_info['currentblockweight'], 0)
assert_equal(mining_info['difficulty'], Decimal('4.656542373906925E-10'))
assert_equal(mining_info['networkhashps'], Decimal('0.003333333333333334'))
assert_equal(mining_info['pooledtx'], 0)
# Mine a block to leave initial block download
node.generate(1)
tmpl = node.getblocktemplate()
self.log.info("getblocktemplate: Test capability advertised")
assert 'proposal' in tmpl['capabilities']
assert 'coinbasetxn' not in tmpl
coinbase_tx = create_coinbase(height=int(tmpl["height"]) + 1)
# sequence numbers must not be max for nLockTime to have effect
coinbase_tx.vin[0].nSequence = 2 ** 32 - 2
coinbase_tx.rehash()
block = CBlock()
block.nVersion = tmpl["version"]
block.hashPrevBlock = int(tmpl["previousblockhash"], 16)
block.nTime = tmpl["curtime"]
block.nBits = int(tmpl["bits"], 16)
block.nNonce = 0
block.vtx = [coinbase_tx]
self.log.info("getblocktemplate: Test valid block")
assert_template(node, block, None)
self.log.info("submitblock: Test block decode failure")
assert_raises_rpc_error(-22, "Block decode failed", node.submitblock, b2x(block.serialize()[:-15]))
self.log.info("getblocktemplate: Test bad input hash for coinbase transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].vin[0].prevout.hash += 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-cb-missing')
self.log.info("submitblock: Test invalid coinbase transaction")
assert_raises_rpc_error(-22, "Block does not start with a coinbase", node.submitblock, b2x(bad_block.serialize()))
self.log.info("getblocktemplate: Test truncated final transaction")
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(block.serialize()[:-1]), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test duplicate transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx.append(bad_block.vtx[0])
assert_template(node, bad_block, 'bad-txns-duplicate')
self.log.info("getblocktemplate: Test invalid transaction")
bad_block = copy.deepcopy(block)
bad_tx = copy.deepcopy(bad_block.vtx[0])
bad_tx.vin[0].prevout.hash = 255
bad_tx.rehash()
bad_block.vtx.append(bad_tx)
assert_template(node, bad_block, 'bad-txns-inputs-missingorspent')
self.log.info("getblocktemplate: Test nonfinal transaction")
bad_block = copy.deepcopy(block)
bad_block.vtx[0].nLockTime = 2 ** 32 - 1
bad_block.vtx[0].rehash()
assert_template(node, bad_block, 'bad-txns-nonfinal')
self.log.info("getblocktemplate: Test bad tx count")
# The tx count is immediately after the block header
TX_COUNT_OFFSET = 80
bad_block_sn = bytearray(block.serialize())
assert_equal(bad_block_sn[TX_COUNT_OFFSET], 1)
bad_block_sn[TX_COUNT_OFFSET] += 1
assert_raises_rpc_error(-22, "Block decode failed", node.getblocktemplate, {'data': b2x(bad_block_sn), 'mode': 'proposal'})
self.log.info("getblocktemplate: Test bad bits")
bad_block = copy.deepcopy(block)
bad_block.nBits = 469762303 # impossible in the real world
assert_template(node, bad_block, 'bad-diffbits')
self.log.info("getblocktemplate: Test bad merkle root")
bad_block = copy.deepcopy(block)
bad_block.hashMerkleRoot += 1
assert_template(node, bad_block, 'bad-txnmrklroot', False)
self.log.info("getblocktemplate: Test bad timestamps")
bad_block = copy.deepcopy(block)
bad_block.nTime = 2 ** 31 - 1
assert_template(node, bad_block, 'time-too-new')
bad_block.nTime = 0
assert_template(node, bad_block, 'time-too-old')
self.log.info("getblocktemplate: Test not best block")
bad_block = copy.deepcopy(block)
bad_block.hashPrevBlock = 123
assert_template(node, bad_block, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
MiningTest().main()
|
py | b4048595a52fa4f91b405d5437548ed12b913a93 | # math2d_line.py
import math
from math2d_vector import Vector
class Line(object):
SIDE_NEITHER = 0
SIDE_FRONT = 1
SIDE_BACK = 2
# Unlike a ray, the normal points orthogonal (perpendicular) to the line, not parallel to it.
# Unlike a line-segment, these have infinite length, like a ray. Rays are for hitting, while
# these are the 2-dimensional analog of planes in 3D space that have front and back spaces.
def __init__(self, center=None, normal=None):
self.center = center if center is not None else Vector(0.0, 0.0)
self.normal = normal if normal is not None else Vector(1.0, 0.0)
def MakeForPoints(self, point_a, point_b):
self.normal = (point_b - point_a).Normalized().RotatedCCW90()
self.center = point_a.Copy()
def SignedDistance(self, point):
vector = point - self.center
return self.normal.Dot(vector)
def Distance(self, point):
return math.fabs(self.SignedDistance(point))
def CalcSide(self, point, epsilon=1e-7):
distance = self.SignedDistance(point)
if math.fabs(distance) <= epsilon:
return Line.SIDE_NEITHER
if distance < 0.0:
return Line.SIDE_BACK
return Line.SIDE_FRONT |
py | b4048935150c2bfe130d84166105cf9f3e96e7fb | # Copyright 2012 Nebula, Inc.
# Copyright (c) 2012 X.commerce, a business unit of eBay Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django import shortcuts
from django.utils.http import urlencode
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from horizon.utils.functions import check_account_is_frozen
from openstack_dashboard import api
from openstack_dashboard.usage import quotas
from openstack_dashboard.utils import filters
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class AllocateIP(tables.LinkAction):
name = "allocate"
verbose_name = _("Allocate IP To Project")
classes = ("ajax-modal",)
icon = "link"
url = "horizon:network:floating_ips:allocate"
def single(self, data_table, request, *args):
return shortcuts.redirect('horizon:network:floating_ips:index')
def allowed(self, request, fip=None):
usages = quotas.tenant_quota_usages(request)
if usages['floating_ips']['available'] <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Allocate IP To Project")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
if api.base.is_service_enabled(request, "network"):
policy = (("network", "create_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:allocate_floating_ip"),)
account_is_frozen = check_account_is_frozen(request)
if account_is_frozen:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
else:
if not (usages['floating_ips']['available'] <= 0):
self.classes = [c for c in self.classes if c != "disabled"]
return POLICY_CHECK(policy, request)
class ReleaseIPs(tables.BatchAction):
name = "release"
classes = ('btn-danger',)
icon = "unlink"
help_text = _("Once a floating IP is released, there is"
" no guarantee the same IP can be allocated again.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Release Floating IP",
u"Release Floating IPs",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Released Floating IP",
u"Released Floating IPs",
count
)
def allowed(self, request, fip=None):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "delete_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:release_floating_ip"),)
return POLICY_CHECK(policy, request)
def action(self, request, obj_id):
api.network.tenant_floating_ip_release(request, obj_id)
# operation log
config = _('Floating IP ID: %s') %obj_id
api.logger.Logger(request).create(resource_type='floatip', action_name='Release Floating Ip',
resource_name='Floating IP', config=config,
status='Success')
class AssociateIP(tables.LinkAction):
name = "associate"
verbose_name = _("Associate")
url = "horizon:network:floating_ips:associate"
classes = ("ajax-modal",)
icon = "link"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:associate_floating_ip"),)
account_is_frozen = check_account_is_frozen(request)
if account_is_frozen:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
else:
self.classes = [c for c in self.classes if c != "disabled"]
return not fip.port_id and POLICY_CHECK(policy, request)
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"ip_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
class DisassociateIP(tables.Action):
name = "disassociate"
verbose_name = _("Disassociate")
classes = ("btn-disassociate", "btn-danger")
icon = "unlink"
def allowed(self, request, fip):
if api.base.is_service_enabled(request, "network"):
policy = (("network", "update_floatingip"),)
else:
policy = (("compute", "compute_extension:floating_ips"),
("compute", "network:disassociate_floating_ip"),)
return fip.port_id and POLICY_CHECK(policy, request)
def single(self, table, request, obj_id):
try:
fip = table.get_object_by_id(filters.get_int_or_uuid(obj_id))
api.network.floating_ip_disassociate(request, fip.id)
LOG.info('Disassociating Floating IP "%s".' % obj_id)
messages.success(request,
_('Successfully disassociated Floating IP: %s')
% fip.ip)
# operation log
config = _('Floating IP ID: %s') %obj_id
api.logger.Logger(request).create(resource_type='floatip', action_name='Disassociate Floating IP',
resource_name='Floating IP', config=config,
status='Success')
except Exception:
exceptions.handle(request,
_('Unable to disassociate floating IP.'))
# operation log
api.logger.Logger(request).create(resource_type='floatip', action_name='Disassociate Floating IP',
resource_name='Floating IP', config= _('Unable to disassociate floating IP.'),
status='Error')
return shortcuts.redirect('horizon:network:floating_ips:index')
class UpdateFloatingIP(tables.LinkAction):
name = "updatefloatingip"
verbose_name = _("Update Floating IP")
url = "horizon:network:floating_ips:update"
classes = ("ajax-modal",)
icon = "link"
def get_link_url(self, datum):
base_url = reverse(self.url)
params = urlencode({"floating_ip_id": self.table.get_object_id(datum)})
return "?".join([base_url, params])
def allowed(self, request, fip):
account_is_frozen = check_account_is_frozen(request)
if account_is_frozen:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
else:
self.classes = [c for c in self.classes if c != "disabled"]
return super(UpdateFloatingIP, self).allowed(request, fip)
def get_instance_info(fip):
if fip.instance_type == 'compute':
return (_("%(instance_name)s %(fixed_ip)s")
% {'instance_name': getattr(fip, "instance_name", ''),
'fixed_ip': fip.fixed_ip})
elif fip.instance_type == 'loadbalancer':
return _("Load Balancer VIP %s") % fip.fixed_ip
elif fip.instance_type:
return fip.fixed_ip
else:
return None
def get_instance_link(datum):
if datum.instance_type == 'compute':
return reverse("horizon:instances:instances:detail",
args=(datum.instance_id,))
else:
return None
STATUS_DISPLAY_CHOICES = (
("active", pgettext_lazy("Current status of a Floating IP", u"Active")),
("down", pgettext_lazy("Current status of a Floating IP", u"Down")),
("error", pgettext_lazy("Current status of a Floating IP", u"Error")),
)
class FloatingIPsTable(tables.DataTable):
STATUS_CHOICES = (
("active", True),
("down", True),
("error", False)
)
ip = tables.Column("ip",
verbose_name=_("IP Address"),
attrs={'data-type': "ip"})
fixed_ip = tables.Column(get_instance_info,
link=get_instance_link,
verbose_name=_("Mapped Fixed IP Address"))
limit_speed = tables.Column("limit_speed",
verbose_name=_("Limit Speed(Mbps)"))
pool = tables.Column("pool_name",
verbose_name=_("Pool"))
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def __init__(self, request, data=None, needs_form_wrapper=None, **kwargs):
super(FloatingIPsTable, self).__init__(
request, data=data, needs_form_wrapper=needs_form_wrapper,
**kwargs)
if not api.base.is_service_enabled(request, 'network'):
del self.columns['status']
def sanitize_id(self, obj_id):
return filters.get_int_or_uuid(obj_id)
def get_object_display(self, datum):
return datum.ip
class Meta(object):
name = "floating_ips"
verbose_name = _("Floating IPs")
table_actions = (AllocateIP, ReleaseIPs)
row_actions = (AssociateIP, DisassociateIP, UpdateFloatingIP, ReleaseIPs)
|
py | b404894ab78a50217cdf65775ed220c34bbf423a | #!/usr/bin/env python3
import argparse
from src.data_processor import lstm_data_processor
parser = argparse.ArgumentParser()
parser.add_argument('-x', '--xtal', type=int, default=54000, help='Input XTAL Id; default = 54000')
parser.add_argument('-y', '--year', type=int, default=2018,
help='List of the years used for making predictions; default = 2018')
parser.add_argument('--period', type=int, default=35,
help='Period of the LSTMs used for making predictions; default=35')
parser.add_argument('-i', '--input_folder', type=str, default='./data/interim',
help='Path to the input folder; default=\'data/interim\'')
parser.add_argument('-o', '--output_folder', type=str, default='./data/processed',
help='Path to output for storing the plots; default=data/processed')
parser.add_argument('-s', '--data_scaler', type=str, default='./src/preprocessing/scaler_2017_all_xtals.pickle',
help='Pickle file containing scaler object.')
args = parser.parse_args()
XTAL = args.xtal
YEAR = args.year
PERIOD = args.period
SCALER = args.data_scaler
prsr = lstm_data_processor(XTAL, YEAR, PERIOD)
prsr.prepare_dataset_from_csv(args.input_folder, SCALER)
prsr.save_to_pickle(args.output_folder)
|
py | b404898568bf8e5e98b2cad8321066a88ddf1f00 | __version__ = 0.3
__title__ = "pymerra2"
__description__ = "A tool for downloading and subsetting NASA MERRA-2 Data"
__url__ = "https://github.com/Ouranosinc/pymerra2"
__author__ = "Trevor James Smith"
__author_email__ = "[email protected]"
__license__ = "Apache 2.0"
__copyright__ = "Copyright 2018 Ouranos Inc."
from pymerra2 import download, variables
|
py | b40489baca8f4859e0a3625792bf88d05c425255 | ###############################################################################
#
# XMLwriter - A base class for XlsxWriter classes.
#
# Used in conjunction with XlsxWriter.
#
# Copyright 2013, John McNamara, [email protected]
#
# Standard packages.
import re
import sys
class XMLwriter(object):
"""
Simple XML writer class.
"""
def __init__(self):
self.fh = None
self.escapes = re.compile('["&<>]')
self.internal_fh = False
def _set_filehandle(self, filehandle):
# Set the writer filehandle directly. Mainly for testing.
self.fh = filehandle
self.internal_fh = False
def _set_xml_writer(self, filename):
# Set the XML writer filehandle for the object. This can either be
# done using _set_filehandle(), usually for testing, or later via
# this method, when assembling the xlsx file.
self.internal_fh = True
if sys.version_info >= (3, 0):
self.fh = open(filename, 'w', encoding='utf-8')
else:
self.fh = open(filename, 'w')
def _xml_close(self):
# Close the XML filehandle if we created it.
if self.internal_fh:
self.fh.close()
def _xml_declaration(self):
# Write the XML declaration.
self.fh.write(
"""<?xml version="1.0" encoding="UTF-8" standalone="yes"?>\n""")
def _xml_start_tag(self, tag, attributes=[]):
# Write an XML start tag with optional attributes.
for key, value in attributes:
value = self._escape_attributes(value)
tag = tag + ' %s="%s"' % (key, value)
self.fh.write("<%s>" % tag)
def _xml_start_tag_unencoded(self, tag, attributes=[]):
# Write an XML start tag with optional, unencoded, attributes.
# This is a minor speed optimisation for elements that don't
# need encoding.
for key, value in attributes:
tag = tag + ' %s="%s"' % (key, value)
self.fh.write("<%s>" % tag)
def _xml_end_tag(self, tag):
# Write an XML end tag.
self.fh.write("</%s>" % tag)
def _xml_empty_tag(self, tag, attributes=[]):
# Write an empty XML tag with optional attributes.
for key, value in attributes:
value = self._escape_attributes(value)
tag = tag + ' %s="%s"' % (key, value)
self.fh.write("<%s/>" % tag)
def _xml_empty_tag_unencoded(self, tag, attributes=[]):
# Write an XML start tag with optional, unencoded, attributes.
# This is a minor speed optimisation for elements that don't
# need encoding.
for key, value in attributes:
tag = tag + ' %s="%s"' % (key, value)
self.fh.write("<%s/>" % tag)
def _xml_data_element(self, tag, data, attributes=[]):
# Write an XML element containing data with optional attributes.
end_tag = tag
for key, value in attributes:
value = self._escape_attributes(value)
tag = tag + ' %s="%s"' % (key, value)
data = self._escape_data(data)
self.fh.write("<%s>%s</%s>" % (tag, data, end_tag))
def _xml_string_element(self, index, attributes=[]):
# Optimised tag writer for <c> cell string elements in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr = attr + ' %s="%s"' % (key, value)
self.fh.write("""<c%s t="s"><v>%d</v></c>""" % (attr, index))
def _xml_si_element(self, string, attributes=[]):
# Optimised tag writer for shared strings <si> elements.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr = attr + ' %s="%s"' % (key, value)
string = self._escape_data(string)
self.fh.write("""<si><t%s>%s</t></si>""" % (attr, string))
def _xml_rich_si_element(self, string):
# Optimised tag writer for shared strings <si> rich string elements.
self.fh.write("""<si>%s</si>""" % string)
def _xml_number_element(self, number, attributes=[]):
# Optimised tag writer for <c> cell number elements in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr = attr + ' %s="%s"' % (key, value)
self.fh.write("""<c%s><v>%.15g</v></c>""" % (attr, number))
def _xml_formula_element(self, formula, result, attributes=[]):
# Optimised tag writer for <c> cell formula elements in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr = attr + ' %s="%s"' % (key, value)
self.fh.write("""<c%s><f>%s</f><v>%s</v></c>"""
% (attr, self._escape_data(formula),
self._escape_data(str(result))))
def _xml_inline_string(self, string, preserve, attributes=[]):
# Optimised tag writer for inlineStr cell elements in the inner loop.
attr = ''
t_attr = ''
# Set the <t> attribute to preserve whitespace.
if preserve:
t_attr = ' xml:space="preserve"'
for key, value in attributes:
value = self._escape_attributes(value)
attr = attr + ' %s="%s"' % (key, value)
string = self._escape_data(string)
self.fh.write("""<c%s t="inlineStr"><is><t%s>%s</t></is></c>""" %
(attr, t_attr, string))
def _xml_rich_inline_string(self, string, attributes=[]):
# Optimised tag writer for rich inlineStr in the inner loop.
attr = ''
for key, value in attributes:
value = self._escape_attributes(value)
attr = attr + ' %s="%s"' % (key, value)
self.fh.write("""<c%s t="inlineStr"><is>%s</is></c>""" %
(attr, string))
def _escape_attributes(self, attribute):
# Escape XML characters in attributes.
try:
if not self.escapes.search(attribute):
return attribute
except TypeError:
return attribute
attribute = attribute.replace('&', '&')
attribute = attribute.replace('"', '"')
attribute = attribute.replace('<', '<')
attribute = attribute.replace('>', '>')
return attribute
def _escape_data(self, data):
# Escape XML characters in data sections of tags. Note, this
# is different from _escape_attributes() in that double quotes
# are not escaped by Excel.
try:
if not self.escapes.search(data):
return data
except TypeError:
return data
data = data.replace('&', '&')
data = data.replace('<', '<')
data = data.replace('>', '>')
return data
|
py | b4048aa59e1f99a8453e8a21b4f96bf2102d6038 | import os
import pickle
import numpy as np
import random as rnd
import matplotlib.pyplot as plt
import torch
from PIL import Image, ImageColor
from collections import namedtuple
import warnings
from trdg.handwritten_model.handwritten import HandwritingSynthesisNetwork
from trdg.handwritten_model import params as pr
import os
# import params as pr
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'
warnings.filterwarnings("ignore")
#
# vocab = np.load(pr.vocab_path)
# print("Vocab: ", vocab)
# char2idx = {x: i for i, x in enumerate(vocab)}
# device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
#
# model = HandwritingSynthesisNetwork(
# len(vocab),
# pr.dec_hidden_size, pr.dec_n_layers,
# pr.n_mixtures_attention, pr.n_mixtures_output,
# device
# )
# model.load_state_dict(torch.load(pr.load_path, map_location=torch.device('cpu')))
# model = model.to(device)
def _sample(e, mu1, mu2, std1, std2, rho):
cov = np.array([[std1 * std1, std1 * std2 * rho], [std1 * std2 * rho, std2 * std2]])
mean = np.array([mu1, mu2])
x, y = np.random.multivariate_normal(mean, cov)
end = np.random.binomial(1, e)
return np.array([x, y, end])
def _split_strokes(points):
points = np.array(points)
strokes = []
b = 0
for e in range(len(points)):
if points[e, 2] == 1.0:
strokes += [points[b : e + 1, :2].copy()]
b = e + 1
return strokes
def _cumsum(points):
sums = np.cumsum(points[:, :2], axis=0)
return np.concatenate([sums, points[:, 2:]], axis=1)
def preprocessing_str(sent, vocab):
chars = list(sent)
chars = [c for c in chars if c in vocab]
return "".join(chars)
def sent2idx(sent, char2idx):
return np.asarray([char2idx[c] for c in sent])
def _sample_text_v2(model, text, vocab, char2idx):
device = model.device
string_processed = preprocessing_str(text, vocab)
# print("Processed: ", string_processed)
chars = torch.from_numpy(
sent2idx(string_processed, char2idx)
).long()[None].to(device)
chars_mask = torch.ones_like(chars).float().to(device)
with torch.no_grad():
coords = model.sample(chars, chars_mask, maxlen=pr.MAX_STROKE_LENGTH)[0].cpu().numpy()
coords = coords[:, [1, 2, 0]] # [x, y, e]
return coords
def _crop_white_borders(image):
image_data = np.asarray(image)
grey_image_data = np.asarray(image.convert("L"))
# plt.imshow(grey_image_data)
# plt.show()
non_empty_columns = np.where(grey_image_data.min(axis=0) < 255)[0]
non_empty_rows = np.where(grey_image_data.min(axis=1) < 255)[0]
# from IPython import embed; embed()
cropBox = (
min(non_empty_rows),
max(non_empty_rows),
min(non_empty_columns),
max(non_empty_columns),
)
image_data_new = image_data[
cropBox[0] : cropBox[1] + 1, cropBox[2] : cropBox[3] + 1, :
]
# plt.imshow(image_data_new)
# plt.show()
return Image.fromarray(image_data_new)
def _join_images(images, padding_size=35):
widths, heights = zip(*(i.size for i in images))
total_width = sum(widths) + padding_size * (len(images)-1)
max_height = max(heights)
compound_image = Image.new("RGBA", (total_width, max_height))
x_offset = 0
for im in images:
compound_image.paste(im, (x_offset, 0))
x_offset += im.size[0] + padding_size
return compound_image
def align_strokes(coords):
"""
corrects for global slant/offset in handwriting strokes
"""
coords = np.copy(coords)
X, Y = coords[:, 0].reshape(-1, 1), coords[:, 1].reshape(-1, 1)
X = np.concatenate([np.ones([X.shape[0], 1]), X], axis=1)
offset, slope = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y).squeeze()
theta = np.arctan(slope)
rotation_matrix = np.array(
[[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]
)
coords[:, :2] = np.dot(coords[:, :2], rotation_matrix) - offset
return coords
def generate(model, vocab, char2idx, text, text_color=None, align=True):
images = []
# colors = [ImageColor.getrgb(c) for c in text_color.split(",")]
# c1, c2 = colors[0], colors[-1]
#
# color = "#{:02x}{:02x}{:02x}".format(
# rnd.randint(min(c1[0], c2[0]), max(c1[0], c2[0])),
# rnd.randint(min(c1[1], c2[1]), max(c1[1], c2[1])),
# rnd.randint(min(c1[2], c2[2]), max(c1[2], c2[2])),
# )
for word in text.split(" "):
# print("Word: ", word)
word += " "
coords = _sample_text_v2(
model, word, vocab, char2idx
)
# print(coords)
# break
fig, ax = plt.subplots(1, 1)
fig.patch.set_visible(False)
ax.axis("off")
strokes = np.concatenate(
[coords[:, 2:3], np.cumsum(coords[:, :2], axis=0)],
axis=1
)
if align:
strokes[:, 1:] = align_strokes(strokes[:, 1:])
stroke = []
color_pen = rnd.choice(['blue', 'black', 'red'])
linewidth = rnd.choice([2,3, 4,5, 8, 12])
# linewidth =
for eos, x, y in strokes:
stroke.append((x, y))
if eos == 1:
xs, ys = zip(*stroke)
ys = np.array(ys)
plt.plot(xs, ys, 'k', c=color_pen, linewidth=linewidth)
stroke = []
if stroke:
xs, ys = zip(*stroke)
ys = np.array(ys)
plt.plot(xs, ys, 'k', c=color_pen, linewidth=linewidth)
# plt.show()
# for stroke in _split_strokes(_cumsum(np.array(coords))):
# plt.plot(stroke[:, 0], -stroke[:, 1], color=color)
fig.patch.set_alpha(0)
fig.patch.set_facecolor("none")
# plt.show()
canvas = plt.get_current_fig_manager().canvas
canvas.draw()
s, (width, height) = canvas.print_to_buffer()
image = Image.frombytes("RGBA", (width, height), s)
mask = Image.new("RGB", (width, height), (0, 0, 0))
# plt.imshow(image)
images.append(_crop_white_borders(image))
# plt.show()
plt.close()
return _join_images(images), mask
if __name__ == "__main__":
img, _ = generate(model, vocab, device, char2idx, "nghiêng", text_color="#000000")
print('mask', _)
plt.imshow(np.asarray(img))
plt.show()
|
py | b4048b34d2bd538ed5be8688fba4043db72233a1 | import os
import csv
import rows_to_columns as rtc
import statistics
import collections
def extract(
directory = './data/BFs_logan/spectral_new',
prefix = 'spectral_raw - '):
data_dict = {}
mode = 'w'
for filename in os.listdir(directory):
filepath=directory+'/'+filename
fn_listchar = list(filename[:-4])
pf_listchar = list(prefix)
for char in pf_listchar:
fn_listchar.remove(char)
bird_ID = ''
for char in fn_listchar:
bird_ID += char
data_dict[bird_ID]={}
with open(filepath) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
data = list(csv_reader)
inventory = data[0][-1]
data_reformatted=[]
for row in data:
syllable = inventory[int(row[0])-1]
bird_ID_syllable = bird_ID+'_'+syllable
if inventory in row:
row.remove(inventory)
if '' in row:
row.remove('')
row_data = [float(value) for value in row[1:6]]
row_data.append(bird_ID_syllable)
data_reformatted.append(row_data)
data_dict[bird_ID]['inventory'] = inventory
data_dict[bird_ID]['data'] = data_reformatted
return data_dict
def syllable_medians(
directory = './data/BFs_logan/spectral_new',
prefix = 'spectral_raw - ',
feats = ['MeanFreq',
'SpecDense',
'Duration',
'LoudEnt',
'SpecTempEnt',
'Label']):
out_dict = {}
extracted_dict = extract(directory, prefix)
for bird_ID, bird_data in extracted_dict.items():
bird_medians_dict = {}
inventory = bird_data['inventory']
inventory_with_IDs = []
for syllable in inventory:
inventory_with_IDs.append(bird_ID + '_' + syllable)
data = bird_data['data']
for syllable in inventory_with_IDs:
relevant_rows = []
for row in data:
if row[-1] == syllable:
relevant_rows.append(row)
columns = rtc.rtc(relevant_rows).items()
syl_dict = {}
for i,column in columns:
if i in range(0,len(feats)-1):
new_column = [float(item) for item in column]
syl_dict[feats[i]] = statistics.median(new_column)
bird_medians_dict[syllable] = syl_dict
ordered_bird_dict = dict(collections.OrderedDict(sorted(bird_medians_dict.items())))
out_dict[bird_ID] = ordered_bird_dict
return out_dict
'''
def reformat_medians():
previous_result=syllable_medians()
with open("./output/spectral_MK.csv", 'w') as output_file:
writer = csv.writer(output_file)
for birdID,bird_dict in previous_result.items():
row = []
row.append(birdID)
for syllable, syl_dict in bird_dict.items():
row.append(syllable)
for feature in syl_dict.values():
row.append(feature)
row.append('')
writer.writerow(row)
'''
def reformat_PCA():
previous_result=syllable_medians()
with open("./output/spectral_MK_PCA.csv", 'w') as output_file:
writer = csv.writer(output_file)
for birdID,bird_dict in previous_result.items():
for syllable, syl_dict in bird_dict.items():
row=[]
for feature in list(syl_dict.values()):
row.append(feature)
row.append(syllable)
writer.writerow(row)
def PCA_tokens(directory = './data/BFs_logan/spectral_new'):
birds_dict = extract(directory=directory)
with open('./output/PCA_tokens.csv', 'w') as output_file:
writer = csv.writer(output_file)
for value in birds_dict.values():
for row in value['data']:
writer.writerow(row)
|
py | b4048c3a8f2e82c421401d56c5c7b84a0e03879d | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
__all__ = [
'AssetItemResponse',
'BlobLocationResponse',
'ColumnSpecificationResponse',
'CommitmentPlanResponse',
'DiagnosticsConfigurationResponse',
'ExampleRequestResponse',
'GraphEdgeResponse',
'GraphNodeResponse',
'GraphPackageResponse',
'GraphParameterLinkResponse',
'GraphParameterResponse',
'InputPortResponse',
'MachineLearningWorkspaceResponse',
'ModeValueInfoResponse',
'ModuleAssetParameterResponse',
'OutputPortResponse',
'RealtimeConfigurationResponse',
'ServiceInputOutputSpecificationResponse',
'StorageAccountResponse',
'TableSpecificationResponse',
'WebServiceKeysResponse',
'WebServiceParameterResponse',
'WebServicePropertiesForGraphResponse',
]
@pulumi.output_type
class AssetItemResponse(dict):
"""
Information about an asset associated with the web service.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "locationInfo":
suggest = "location_info"
elif key == "inputPorts":
suggest = "input_ports"
elif key == "outputPorts":
suggest = "output_ports"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in AssetItemResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
AssetItemResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
AssetItemResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
location_info: 'outputs.BlobLocationResponse',
name: str,
type: str,
id: Optional[str] = None,
input_ports: Optional[Mapping[str, 'outputs.InputPortResponse']] = None,
metadata: Optional[Mapping[str, str]] = None,
output_ports: Optional[Mapping[str, 'outputs.OutputPortResponse']] = None,
parameters: Optional[Sequence['outputs.ModuleAssetParameterResponse']] = None):
"""
Information about an asset associated with the web service.
:param 'BlobLocationResponse' location_info: Access information for the asset.
:param str name: Asset's friendly name.
:param str type: Asset's type.
:param str id: Asset's Id.
:param Mapping[str, 'InputPortResponse'] input_ports: Information about the asset's input ports.
:param Mapping[str, str] metadata: If the asset is a custom module, this holds the module's metadata.
:param Mapping[str, 'OutputPortResponse'] output_ports: Information about the asset's output ports.
:param Sequence['ModuleAssetParameterResponse'] parameters: If the asset is a custom module, this holds the module's parameters.
"""
pulumi.set(__self__, "location_info", location_info)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "type", type)
if id is not None:
pulumi.set(__self__, "id", id)
if input_ports is not None:
pulumi.set(__self__, "input_ports", input_ports)
if metadata is not None:
pulumi.set(__self__, "metadata", metadata)
if output_ports is not None:
pulumi.set(__self__, "output_ports", output_ports)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter(name="locationInfo")
def location_info(self) -> 'outputs.BlobLocationResponse':
"""
Access information for the asset.
"""
return pulumi.get(self, "location_info")
@property
@pulumi.getter
def name(self) -> str:
"""
Asset's friendly name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def type(self) -> str:
"""
Asset's type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
Asset's Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="inputPorts")
def input_ports(self) -> Optional[Mapping[str, 'outputs.InputPortResponse']]:
"""
Information about the asset's input ports.
"""
return pulumi.get(self, "input_ports")
@property
@pulumi.getter
def metadata(self) -> Optional[Mapping[str, str]]:
"""
If the asset is a custom module, this holds the module's metadata.
"""
return pulumi.get(self, "metadata")
@property
@pulumi.getter(name="outputPorts")
def output_ports(self) -> Optional[Mapping[str, 'outputs.OutputPortResponse']]:
"""
Information about the asset's output ports.
"""
return pulumi.get(self, "output_ports")
@property
@pulumi.getter
def parameters(self) -> Optional[Sequence['outputs.ModuleAssetParameterResponse']]:
"""
If the asset is a custom module, this holds the module's parameters.
"""
return pulumi.get(self, "parameters")
@pulumi.output_type
class BlobLocationResponse(dict):
"""
Describes the access location for a blob.
"""
def __init__(__self__, *,
uri: str,
credentials: Optional[str] = None):
"""
Describes the access location for a blob.
:param str uri: The URI from which the blob is accessible from. For example, aml://abc for system assets or https://xyz for user assets or payload.
:param str credentials: Access credentials for the blob, if applicable (e.g. blob specified by storage account connection string + blob URI)
"""
pulumi.set(__self__, "uri", uri)
if credentials is not None:
pulumi.set(__self__, "credentials", credentials)
@property
@pulumi.getter
def uri(self) -> str:
"""
The URI from which the blob is accessible from. For example, aml://abc for system assets or https://xyz for user assets or payload.
"""
return pulumi.get(self, "uri")
@property
@pulumi.getter
def credentials(self) -> Optional[str]:
"""
Access credentials for the blob, if applicable (e.g. blob specified by storage account connection string + blob URI)
"""
return pulumi.get(self, "credentials")
@pulumi.output_type
class ColumnSpecificationResponse(dict):
"""
Swagger 2.0 schema for a column within the data table representing a web service input or output. See Swagger specification: http://swagger.io/specification/
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "xMsIsnullable":
suggest = "x_ms_isnullable"
elif key == "xMsIsordered":
suggest = "x_ms_isordered"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ColumnSpecificationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ColumnSpecificationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ColumnSpecificationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
type: str,
enum: Optional[Sequence[Any]] = None,
format: Optional[str] = None,
x_ms_isnullable: Optional[bool] = None,
x_ms_isordered: Optional[bool] = None):
"""
Swagger 2.0 schema for a column within the data table representing a web service input or output. See Swagger specification: http://swagger.io/specification/
:param str type: Data type of the column.
:param Sequence[Any] enum: If the data type is categorical, this provides the list of accepted categories.
:param str format: Additional format information for the data type.
:param bool x_ms_isnullable: Flag indicating if the type supports null values or not.
:param bool x_ms_isordered: Flag indicating whether the categories are treated as an ordered set or not, if this is a categorical column.
"""
pulumi.set(__self__, "type", type)
if enum is not None:
pulumi.set(__self__, "enum", enum)
if format is not None:
pulumi.set(__self__, "format", format)
if x_ms_isnullable is not None:
pulumi.set(__self__, "x_ms_isnullable", x_ms_isnullable)
if x_ms_isordered is not None:
pulumi.set(__self__, "x_ms_isordered", x_ms_isordered)
@property
@pulumi.getter
def type(self) -> str:
"""
Data type of the column.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def enum(self) -> Optional[Sequence[Any]]:
"""
If the data type is categorical, this provides the list of accepted categories.
"""
return pulumi.get(self, "enum")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
Additional format information for the data type.
"""
return pulumi.get(self, "format")
@property
@pulumi.getter(name="xMsIsnullable")
def x_ms_isnullable(self) -> Optional[bool]:
"""
Flag indicating if the type supports null values or not.
"""
return pulumi.get(self, "x_ms_isnullable")
@property
@pulumi.getter(name="xMsIsordered")
def x_ms_isordered(self) -> Optional[bool]:
"""
Flag indicating whether the categories are treated as an ordered set or not, if this is a categorical column.
"""
return pulumi.get(self, "x_ms_isordered")
@pulumi.output_type
class CommitmentPlanResponse(dict):
"""
Information about the machine learning commitment plan associated with the web service.
"""
def __init__(__self__, *,
id: str):
"""
Information about the machine learning commitment plan associated with the web service.
:param str id: Specifies the Azure Resource Manager ID of the commitment plan associated with the web service.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the Azure Resource Manager ID of the commitment plan associated with the web service.
"""
return pulumi.get(self, "id")
@pulumi.output_type
class DiagnosticsConfigurationResponse(dict):
"""
Diagnostics settings for an Azure ML web service.
"""
def __init__(__self__, *,
level: str,
expiry: Optional[str] = None):
"""
Diagnostics settings for an Azure ML web service.
:param str level: Specifies the verbosity of the diagnostic output. Valid values are: None - disables tracing; Error - collects only error (stderr) traces; All - collects all traces (stdout and stderr).
:param str expiry: Specifies the date and time when the logging will cease. If null, diagnostic collection is not time limited.
"""
pulumi.set(__self__, "level", level)
if expiry is not None:
pulumi.set(__self__, "expiry", expiry)
@property
@pulumi.getter
def level(self) -> str:
"""
Specifies the verbosity of the diagnostic output. Valid values are: None - disables tracing; Error - collects only error (stderr) traces; All - collects all traces (stdout and stderr).
"""
return pulumi.get(self, "level")
@property
@pulumi.getter
def expiry(self) -> Optional[str]:
"""
Specifies the date and time when the logging will cease. If null, diagnostic collection is not time limited.
"""
return pulumi.get(self, "expiry")
@pulumi.output_type
class ExampleRequestResponse(dict):
"""
Sample input data for the service's input(s).
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "globalParameters":
suggest = "global_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ExampleRequestResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ExampleRequestResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ExampleRequestResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
global_parameters: Optional[Mapping[str, Any]] = None,
inputs: Optional[Mapping[str, Sequence[Sequence[Any]]]] = None):
"""
Sample input data for the service's input(s).
:param Mapping[str, Any] global_parameters: Sample input data for the web service's global parameters
:param Mapping[str, Sequence[Sequence[Any]]] inputs: Sample input data for the web service's input(s) given as an input name to sample input values matrix map.
"""
if global_parameters is not None:
pulumi.set(__self__, "global_parameters", global_parameters)
if inputs is not None:
pulumi.set(__self__, "inputs", inputs)
@property
@pulumi.getter(name="globalParameters")
def global_parameters(self) -> Optional[Mapping[str, Any]]:
"""
Sample input data for the web service's global parameters
"""
return pulumi.get(self, "global_parameters")
@property
@pulumi.getter
def inputs(self) -> Optional[Mapping[str, Sequence[Sequence[Any]]]]:
"""
Sample input data for the web service's input(s) given as an input name to sample input values matrix map.
"""
return pulumi.get(self, "inputs")
@pulumi.output_type
class GraphEdgeResponse(dict):
"""
Defines an edge within the web service's graph.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sourceNodeId":
suggest = "source_node_id"
elif key == "sourcePortId":
suggest = "source_port_id"
elif key == "targetNodeId":
suggest = "target_node_id"
elif key == "targetPortId":
suggest = "target_port_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GraphEdgeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GraphEdgeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GraphEdgeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
source_node_id: Optional[str] = None,
source_port_id: Optional[str] = None,
target_node_id: Optional[str] = None,
target_port_id: Optional[str] = None):
"""
Defines an edge within the web service's graph.
:param str source_node_id: The source graph node's identifier.
:param str source_port_id: The identifier of the source node's port that the edge connects from.
:param str target_node_id: The destination graph node's identifier.
:param str target_port_id: The identifier of the destination node's port that the edge connects into.
"""
if source_node_id is not None:
pulumi.set(__self__, "source_node_id", source_node_id)
if source_port_id is not None:
pulumi.set(__self__, "source_port_id", source_port_id)
if target_node_id is not None:
pulumi.set(__self__, "target_node_id", target_node_id)
if target_port_id is not None:
pulumi.set(__self__, "target_port_id", target_port_id)
@property
@pulumi.getter(name="sourceNodeId")
def source_node_id(self) -> Optional[str]:
"""
The source graph node's identifier.
"""
return pulumi.get(self, "source_node_id")
@property
@pulumi.getter(name="sourcePortId")
def source_port_id(self) -> Optional[str]:
"""
The identifier of the source node's port that the edge connects from.
"""
return pulumi.get(self, "source_port_id")
@property
@pulumi.getter(name="targetNodeId")
def target_node_id(self) -> Optional[str]:
"""
The destination graph node's identifier.
"""
return pulumi.get(self, "target_node_id")
@property
@pulumi.getter(name="targetPortId")
def target_port_id(self) -> Optional[str]:
"""
The identifier of the destination node's port that the edge connects into.
"""
return pulumi.get(self, "target_port_id")
@pulumi.output_type
class GraphNodeResponse(dict):
"""
Specifies a node in the web service graph. The node can either be an input, output or asset node, so only one of the corresponding id properties is populated at any given time.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "assetId":
suggest = "asset_id"
elif key == "inputId":
suggest = "input_id"
elif key == "outputId":
suggest = "output_id"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GraphNodeResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GraphNodeResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GraphNodeResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
asset_id: Optional[str] = None,
input_id: Optional[str] = None,
output_id: Optional[str] = None,
parameters: Optional[Mapping[str, 'outputs.WebServiceParameterResponse']] = None):
"""
Specifies a node in the web service graph. The node can either be an input, output or asset node, so only one of the corresponding id properties is populated at any given time.
:param str asset_id: The id of the asset represented by this node.
:param str input_id: The id of the input element represented by this node.
:param str output_id: The id of the output element represented by this node.
:param Mapping[str, 'WebServiceParameterResponse'] parameters: If applicable, parameters of the node. Global graph parameters map into these, with values set at runtime.
"""
if asset_id is not None:
pulumi.set(__self__, "asset_id", asset_id)
if input_id is not None:
pulumi.set(__self__, "input_id", input_id)
if output_id is not None:
pulumi.set(__self__, "output_id", output_id)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter(name="assetId")
def asset_id(self) -> Optional[str]:
"""
The id of the asset represented by this node.
"""
return pulumi.get(self, "asset_id")
@property
@pulumi.getter(name="inputId")
def input_id(self) -> Optional[str]:
"""
The id of the input element represented by this node.
"""
return pulumi.get(self, "input_id")
@property
@pulumi.getter(name="outputId")
def output_id(self) -> Optional[str]:
"""
The id of the output element represented by this node.
"""
return pulumi.get(self, "output_id")
@property
@pulumi.getter
def parameters(self) -> Optional[Mapping[str, 'outputs.WebServiceParameterResponse']]:
"""
If applicable, parameters of the node. Global graph parameters map into these, with values set at runtime.
"""
return pulumi.get(self, "parameters")
@pulumi.output_type
class GraphPackageResponse(dict):
"""
Defines the graph of modules making up the machine learning solution.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "graphParameters":
suggest = "graph_parameters"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GraphPackageResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GraphPackageResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GraphPackageResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
edges: Optional[Sequence['outputs.GraphEdgeResponse']] = None,
graph_parameters: Optional[Mapping[str, 'outputs.GraphParameterResponse']] = None,
nodes: Optional[Mapping[str, 'outputs.GraphNodeResponse']] = None):
"""
Defines the graph of modules making up the machine learning solution.
:param Sequence['GraphEdgeResponse'] edges: The list of edges making up the graph.
:param Mapping[str, 'GraphParameterResponse'] graph_parameters: The collection of global parameters for the graph, given as a global parameter name to GraphParameter map. Each parameter here has a 1:1 match with the global parameters values map declared at the WebServiceProperties level.
:param Mapping[str, 'GraphNodeResponse'] nodes: The set of nodes making up the graph, provided as a nodeId to GraphNode map
"""
if edges is not None:
pulumi.set(__self__, "edges", edges)
if graph_parameters is not None:
pulumi.set(__self__, "graph_parameters", graph_parameters)
if nodes is not None:
pulumi.set(__self__, "nodes", nodes)
@property
@pulumi.getter
def edges(self) -> Optional[Sequence['outputs.GraphEdgeResponse']]:
"""
The list of edges making up the graph.
"""
return pulumi.get(self, "edges")
@property
@pulumi.getter(name="graphParameters")
def graph_parameters(self) -> Optional[Mapping[str, 'outputs.GraphParameterResponse']]:
"""
The collection of global parameters for the graph, given as a global parameter name to GraphParameter map. Each parameter here has a 1:1 match with the global parameters values map declared at the WebServiceProperties level.
"""
return pulumi.get(self, "graph_parameters")
@property
@pulumi.getter
def nodes(self) -> Optional[Mapping[str, 'outputs.GraphNodeResponse']]:
"""
The set of nodes making up the graph, provided as a nodeId to GraphNode map
"""
return pulumi.get(self, "nodes")
@pulumi.output_type
class GraphParameterLinkResponse(dict):
"""
Association link for a graph global parameter to a node in the graph.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "nodeId":
suggest = "node_id"
elif key == "parameterKey":
suggest = "parameter_key"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in GraphParameterLinkResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
GraphParameterLinkResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
GraphParameterLinkResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
node_id: str,
parameter_key: str):
"""
Association link for a graph global parameter to a node in the graph.
:param str node_id: The graph node's identifier
:param str parameter_key: The identifier of the node parameter that the global parameter maps to.
"""
pulumi.set(__self__, "node_id", node_id)
pulumi.set(__self__, "parameter_key", parameter_key)
@property
@pulumi.getter(name="nodeId")
def node_id(self) -> str:
"""
The graph node's identifier
"""
return pulumi.get(self, "node_id")
@property
@pulumi.getter(name="parameterKey")
def parameter_key(self) -> str:
"""
The identifier of the node parameter that the global parameter maps to.
"""
return pulumi.get(self, "parameter_key")
@pulumi.output_type
class GraphParameterResponse(dict):
"""
Defines a global parameter in the graph.
"""
def __init__(__self__, *,
links: Sequence['outputs.GraphParameterLinkResponse'],
type: str,
description: Optional[str] = None):
"""
Defines a global parameter in the graph.
:param Sequence['GraphParameterLinkResponse'] links: Association links for this parameter to nodes in the graph.
:param str type: Graph parameter's type.
:param str description: Description of this graph parameter.
"""
pulumi.set(__self__, "links", links)
pulumi.set(__self__, "type", type)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def links(self) -> Sequence['outputs.GraphParameterLinkResponse']:
"""
Association links for this parameter to nodes in the graph.
"""
return pulumi.get(self, "links")
@property
@pulumi.getter
def type(self) -> str:
"""
Graph parameter's type.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Description of this graph parameter.
"""
return pulumi.get(self, "description")
@pulumi.output_type
class InputPortResponse(dict):
"""
Asset input port
"""
def __init__(__self__, *,
type: Optional[str] = None):
"""
Asset input port
:param str type: Port data type.
"""
if type is None:
type = 'Dataset'
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Port data type.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class MachineLearningWorkspaceResponse(dict):
"""
Information about the machine learning workspace containing the experiment that is source for the web service.
"""
def __init__(__self__, *,
id: str):
"""
Information about the machine learning workspace containing the experiment that is source for the web service.
:param str id: Specifies the workspace ID of the machine learning workspace associated with the web service
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the workspace ID of the machine learning workspace associated with the web service
"""
return pulumi.get(self, "id")
@pulumi.output_type
class ModeValueInfoResponse(dict):
"""
Nested parameter definition.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "interfaceString":
suggest = "interface_string"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModeValueInfoResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModeValueInfoResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModeValueInfoResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
interface_string: Optional[str] = None,
parameters: Optional[Sequence['outputs.ModuleAssetParameterResponse']] = None):
"""
Nested parameter definition.
:param str interface_string: The interface string name for the nested parameter.
:param Sequence['ModuleAssetParameterResponse'] parameters: The definition of the parameter.
"""
if interface_string is not None:
pulumi.set(__self__, "interface_string", interface_string)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
@property
@pulumi.getter(name="interfaceString")
def interface_string(self) -> Optional[str]:
"""
The interface string name for the nested parameter.
"""
return pulumi.get(self, "interface_string")
@property
@pulumi.getter
def parameters(self) -> Optional[Sequence['outputs.ModuleAssetParameterResponse']]:
"""
The definition of the parameter.
"""
return pulumi.get(self, "parameters")
@pulumi.output_type
class ModuleAssetParameterResponse(dict):
"""
Parameter definition for a module asset.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "modeValuesInfo":
suggest = "mode_values_info"
elif key == "parameterType":
suggest = "parameter_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ModuleAssetParameterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ModuleAssetParameterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ModuleAssetParameterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
mode_values_info: Optional[Mapping[str, 'outputs.ModeValueInfoResponse']] = None,
name: Optional[str] = None,
parameter_type: Optional[str] = None):
"""
Parameter definition for a module asset.
:param Mapping[str, 'ModeValueInfoResponse'] mode_values_info: Definitions for nested interface parameters if this is a complex module parameter.
:param str name: Parameter name.
:param str parameter_type: Parameter type.
"""
if mode_values_info is not None:
pulumi.set(__self__, "mode_values_info", mode_values_info)
if name is not None:
pulumi.set(__self__, "name", name)
if parameter_type is not None:
pulumi.set(__self__, "parameter_type", parameter_type)
@property
@pulumi.getter(name="modeValuesInfo")
def mode_values_info(self) -> Optional[Mapping[str, 'outputs.ModeValueInfoResponse']]:
"""
Definitions for nested interface parameters if this is a complex module parameter.
"""
return pulumi.get(self, "mode_values_info")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Parameter name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="parameterType")
def parameter_type(self) -> Optional[str]:
"""
Parameter type.
"""
return pulumi.get(self, "parameter_type")
@pulumi.output_type
class OutputPortResponse(dict):
"""
Asset output port
"""
def __init__(__self__, *,
type: Optional[str] = None):
"""
Asset output port
:param str type: Port data type.
"""
if type is None:
type = 'Dataset'
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Port data type.
"""
return pulumi.get(self, "type")
@pulumi.output_type
class RealtimeConfigurationResponse(dict):
"""
Holds the available configuration options for an Azure ML web service endpoint.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxConcurrentCalls":
suggest = "max_concurrent_calls"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in RealtimeConfigurationResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
RealtimeConfigurationResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
RealtimeConfigurationResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_concurrent_calls: Optional[int] = None):
"""
Holds the available configuration options for an Azure ML web service endpoint.
:param int max_concurrent_calls: Specifies the maximum concurrent calls that can be made to the web service. Minimum value: 4, Maximum value: 200.
"""
if max_concurrent_calls is not None:
pulumi.set(__self__, "max_concurrent_calls", max_concurrent_calls)
@property
@pulumi.getter(name="maxConcurrentCalls")
def max_concurrent_calls(self) -> Optional[int]:
"""
Specifies the maximum concurrent calls that can be made to the web service. Minimum value: 4, Maximum value: 200.
"""
return pulumi.get(self, "max_concurrent_calls")
@pulumi.output_type
class ServiceInputOutputSpecificationResponse(dict):
"""
The swagger 2.0 schema describing the service's inputs or outputs. See Swagger specification: http://swagger.io/specification/
"""
def __init__(__self__, *,
properties: Mapping[str, 'outputs.TableSpecificationResponse'],
type: str,
description: Optional[str] = None,
title: Optional[str] = None):
"""
The swagger 2.0 schema describing the service's inputs or outputs. See Swagger specification: http://swagger.io/specification/
:param Mapping[str, 'TableSpecificationResponse'] properties: Specifies a collection that contains the column schema for each input or output of the web service. For more information, see the Swagger specification.
:param str type: The type of the entity described in swagger. Always 'object'.
:param str description: The description of the Swagger schema.
:param str title: The title of your Swagger schema.
"""
pulumi.set(__self__, "properties", properties)
if type is None:
type = 'object'
pulumi.set(__self__, "type", type)
if description is not None:
pulumi.set(__self__, "description", description)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def properties(self) -> Mapping[str, 'outputs.TableSpecificationResponse']:
"""
Specifies a collection that contains the column schema for each input or output of the web service. For more information, see the Swagger specification.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the entity described in swagger. Always 'object'.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the Swagger schema.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def title(self) -> Optional[str]:
"""
The title of your Swagger schema.
"""
return pulumi.get(self, "title")
@pulumi.output_type
class StorageAccountResponse(dict):
"""
Access information for a storage account.
"""
def __init__(__self__, *,
key: Optional[str] = None,
name: Optional[str] = None):
"""
Access information for a storage account.
:param str key: Specifies the key used to access the storage account.
:param str name: Specifies the name of the storage account.
"""
if key is not None:
pulumi.set(__self__, "key", key)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def key(self) -> Optional[str]:
"""
Specifies the key used to access the storage account.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Specifies the name of the storage account.
"""
return pulumi.get(self, "name")
@pulumi.output_type
class TableSpecificationResponse(dict):
"""
The swagger 2.0 schema describing a single service input or output. See Swagger specification: http://swagger.io/specification/
"""
def __init__(__self__, *,
type: str,
description: Optional[str] = None,
format: Optional[str] = None,
properties: Optional[Mapping[str, 'outputs.ColumnSpecificationResponse']] = None,
title: Optional[str] = None):
"""
The swagger 2.0 schema describing a single service input or output. See Swagger specification: http://swagger.io/specification/
:param str type: The type of the entity described in swagger.
:param str description: Swagger schema description.
:param str format: The format, if 'type' is not 'object'
:param Mapping[str, 'ColumnSpecificationResponse'] properties: The set of columns within the data table.
:param str title: Swagger schema title.
"""
if type is None:
type = 'object'
pulumi.set(__self__, "type", type)
if description is not None:
pulumi.set(__self__, "description", description)
if format is not None:
pulumi.set(__self__, "format", format)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def type(self) -> str:
"""
The type of the entity described in swagger.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
Swagger schema description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def format(self) -> Optional[str]:
"""
The format, if 'type' is not 'object'
"""
return pulumi.get(self, "format")
@property
@pulumi.getter
def properties(self) -> Optional[Mapping[str, 'outputs.ColumnSpecificationResponse']]:
"""
The set of columns within the data table.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def title(self) -> Optional[str]:
"""
Swagger schema title.
"""
return pulumi.get(self, "title")
@pulumi.output_type
class WebServiceKeysResponse(dict):
"""
Access keys for the web service calls.
"""
def __init__(__self__, *,
primary: Optional[str] = None,
secondary: Optional[str] = None):
"""
Access keys for the web service calls.
:param str primary: The primary access key.
:param str secondary: The secondary access key.
"""
if primary is not None:
pulumi.set(__self__, "primary", primary)
if secondary is not None:
pulumi.set(__self__, "secondary", secondary)
@property
@pulumi.getter
def primary(self) -> Optional[str]:
"""
The primary access key.
"""
return pulumi.get(self, "primary")
@property
@pulumi.getter
def secondary(self) -> Optional[str]:
"""
The secondary access key.
"""
return pulumi.get(self, "secondary")
@pulumi.output_type
class WebServiceParameterResponse(dict):
"""
Web Service Parameter object for node and global parameter
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "certificateThumbprint":
suggest = "certificate_thumbprint"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WebServiceParameterResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WebServiceParameterResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WebServiceParameterResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
certificate_thumbprint: Optional[str] = None,
value: Optional[Any] = None):
"""
Web Service Parameter object for node and global parameter
:param str certificate_thumbprint: If the parameter value in 'value' field is encrypted, the thumbprint of the certificate should be put here.
:param Any value: The parameter value
"""
if certificate_thumbprint is not None:
pulumi.set(__self__, "certificate_thumbprint", certificate_thumbprint)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter(name="certificateThumbprint")
def certificate_thumbprint(self) -> Optional[str]:
"""
If the parameter value in 'value' field is encrypted, the thumbprint of the certificate should be put here.
"""
return pulumi.get(self, "certificate_thumbprint")
@property
@pulumi.getter
def value(self) -> Optional[Any]:
"""
The parameter value
"""
return pulumi.get(self, "value")
@pulumi.output_type
class WebServicePropertiesForGraphResponse(dict):
"""
Properties specific to a Graph based web service.
"""
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "createdOn":
suggest = "created_on"
elif key == "modifiedOn":
suggest = "modified_on"
elif key == "packageType":
suggest = "package_type"
elif key == "provisioningState":
suggest = "provisioning_state"
elif key == "swaggerLocation":
suggest = "swagger_location"
elif key == "commitmentPlan":
suggest = "commitment_plan"
elif key == "exampleRequest":
suggest = "example_request"
elif key == "exposeSampleData":
suggest = "expose_sample_data"
elif key == "machineLearningWorkspace":
suggest = "machine_learning_workspace"
elif key == "payloadsInBlobStorage":
suggest = "payloads_in_blob_storage"
elif key == "payloadsLocation":
suggest = "payloads_location"
elif key == "readOnly":
suggest = "read_only"
elif key == "realtimeConfiguration":
suggest = "realtime_configuration"
elif key == "storageAccount":
suggest = "storage_account"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in WebServicePropertiesForGraphResponse. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
WebServicePropertiesForGraphResponse.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
WebServicePropertiesForGraphResponse.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
created_on: str,
modified_on: str,
package_type: str,
provisioning_state: str,
swagger_location: str,
assets: Optional[Mapping[str, 'outputs.AssetItemResponse']] = None,
commitment_plan: Optional['outputs.CommitmentPlanResponse'] = None,
description: Optional[str] = None,
diagnostics: Optional['outputs.DiagnosticsConfigurationResponse'] = None,
example_request: Optional['outputs.ExampleRequestResponse'] = None,
expose_sample_data: Optional[bool] = None,
input: Optional['outputs.ServiceInputOutputSpecificationResponse'] = None,
keys: Optional['outputs.WebServiceKeysResponse'] = None,
machine_learning_workspace: Optional['outputs.MachineLearningWorkspaceResponse'] = None,
output: Optional['outputs.ServiceInputOutputSpecificationResponse'] = None,
package: Optional['outputs.GraphPackageResponse'] = None,
parameters: Optional[Mapping[str, 'outputs.WebServiceParameterResponse']] = None,
payloads_in_blob_storage: Optional[bool] = None,
payloads_location: Optional['outputs.BlobLocationResponse'] = None,
read_only: Optional[bool] = None,
realtime_configuration: Optional['outputs.RealtimeConfigurationResponse'] = None,
storage_account: Optional['outputs.StorageAccountResponse'] = None,
title: Optional[str] = None):
"""
Properties specific to a Graph based web service.
:param str created_on: Read Only: The date and time when the web service was created.
:param str modified_on: Read Only: The date and time when the web service was last modified.
:param str package_type: Specifies the package type. Valid values are Graph (Specifies a web service published through the Machine Learning Studio) and Code (Specifies a web service published using code such as Python). Note: Code is not supported at this time.
Expected value is 'Graph'.
:param str provisioning_state: Read Only: The provision state of the web service. Valid values are Unknown, Provisioning, Succeeded, and Failed.
:param str swagger_location: Read Only: Contains the URI of the swagger spec associated with this web service.
:param Mapping[str, 'AssetItemResponse'] assets: Contains user defined properties describing web service assets. Properties are expressed as Key/Value pairs.
:param 'CommitmentPlanResponse' commitment_plan: Contains the commitment plan associated with this web service. Set at creation time. Once set, this value cannot be changed. Note: The commitment plan is not returned from calls to GET operations.
:param str description: The description of the web service.
:param 'DiagnosticsConfigurationResponse' diagnostics: Settings controlling the diagnostics traces collection for the web service.
:param 'ExampleRequestResponse' example_request: Defines sample input data for one or more of the service's inputs.
:param bool expose_sample_data: When set to true, sample data is included in the web service's swagger definition. The default value is true.
:param 'ServiceInputOutputSpecificationResponse' input: Contains the Swagger 2.0 schema describing one or more of the web service's inputs. For more information, see the Swagger specification.
:param 'WebServiceKeysResponse' keys: Contains the web service provisioning keys. If you do not specify provisioning keys, the Azure Machine Learning system generates them for you. Note: The keys are not returned from calls to GET operations.
:param 'MachineLearningWorkspaceResponse' machine_learning_workspace: Specifies the Machine Learning workspace containing the experiment that is source for the web service.
:param 'ServiceInputOutputSpecificationResponse' output: Contains the Swagger 2.0 schema describing one or more of the web service's outputs. For more information, see the Swagger specification.
:param 'GraphPackageResponse' package: The definition of the graph package making up this web service.
:param Mapping[str, 'WebServiceParameterResponse'] parameters: The set of global parameters values defined for the web service, given as a global parameter name to default value map. If no default value is specified, the parameter is considered to be required.
:param bool payloads_in_blob_storage: When set to true, indicates that the payload size is larger than 3 MB. Otherwise false. If the payload size exceed 3 MB, the payload is stored in a blob and the PayloadsLocation parameter contains the URI of the blob. Otherwise, this will be set to false and Assets, Input, Output, Package, Parameters, ExampleRequest are inline. The Payload sizes is determined by adding the size of the Assets, Input, Output, Package, Parameters, and the ExampleRequest.
:param 'BlobLocationResponse' payloads_location: The URI of the payload blob. This parameter contains a value only if the payloadsInBlobStorage parameter is set to true. Otherwise is set to null.
:param bool read_only: When set to true, indicates that the web service is read-only and can no longer be updated or patched, only removed. Default, is false. Note: Once set to true, you cannot change its value.
:param 'RealtimeConfigurationResponse' realtime_configuration: Contains the configuration settings for the web service endpoint.
:param 'StorageAccountResponse' storage_account: Specifies the storage account that Azure Machine Learning uses to store information about the web service. Only the name of the storage account is returned from calls to GET operations. When updating the storage account information, you must ensure that all necessary assets are available in the new storage account or calls to your web service will fail.
:param str title: The title of the web service.
"""
pulumi.set(__self__, "created_on", created_on)
pulumi.set(__self__, "modified_on", modified_on)
pulumi.set(__self__, "package_type", 'Graph')
pulumi.set(__self__, "provisioning_state", provisioning_state)
pulumi.set(__self__, "swagger_location", swagger_location)
if assets is not None:
pulumi.set(__self__, "assets", assets)
if commitment_plan is not None:
pulumi.set(__self__, "commitment_plan", commitment_plan)
if description is not None:
pulumi.set(__self__, "description", description)
if diagnostics is not None:
pulumi.set(__self__, "diagnostics", diagnostics)
if example_request is not None:
pulumi.set(__self__, "example_request", example_request)
if expose_sample_data is not None:
pulumi.set(__self__, "expose_sample_data", expose_sample_data)
if input is not None:
pulumi.set(__self__, "input", input)
if keys is not None:
pulumi.set(__self__, "keys", keys)
if machine_learning_workspace is not None:
pulumi.set(__self__, "machine_learning_workspace", machine_learning_workspace)
if output is not None:
pulumi.set(__self__, "output", output)
if package is not None:
pulumi.set(__self__, "package", package)
if parameters is not None:
pulumi.set(__self__, "parameters", parameters)
if payloads_in_blob_storage is not None:
pulumi.set(__self__, "payloads_in_blob_storage", payloads_in_blob_storage)
if payloads_location is not None:
pulumi.set(__self__, "payloads_location", payloads_location)
if read_only is not None:
pulumi.set(__self__, "read_only", read_only)
if realtime_configuration is not None:
pulumi.set(__self__, "realtime_configuration", realtime_configuration)
if storage_account is not None:
pulumi.set(__self__, "storage_account", storage_account)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter(name="createdOn")
def created_on(self) -> str:
"""
Read Only: The date and time when the web service was created.
"""
return pulumi.get(self, "created_on")
@property
@pulumi.getter(name="modifiedOn")
def modified_on(self) -> str:
"""
Read Only: The date and time when the web service was last modified.
"""
return pulumi.get(self, "modified_on")
@property
@pulumi.getter(name="packageType")
def package_type(self) -> str:
"""
Specifies the package type. Valid values are Graph (Specifies a web service published through the Machine Learning Studio) and Code (Specifies a web service published using code such as Python). Note: Code is not supported at this time.
Expected value is 'Graph'.
"""
return pulumi.get(self, "package_type")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Read Only: The provision state of the web service. Valid values are Unknown, Provisioning, Succeeded, and Failed.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="swaggerLocation")
def swagger_location(self) -> str:
"""
Read Only: Contains the URI of the swagger spec associated with this web service.
"""
return pulumi.get(self, "swagger_location")
@property
@pulumi.getter
def assets(self) -> Optional[Mapping[str, 'outputs.AssetItemResponse']]:
"""
Contains user defined properties describing web service assets. Properties are expressed as Key/Value pairs.
"""
return pulumi.get(self, "assets")
@property
@pulumi.getter(name="commitmentPlan")
def commitment_plan(self) -> Optional['outputs.CommitmentPlanResponse']:
"""
Contains the commitment plan associated with this web service. Set at creation time. Once set, this value cannot be changed. Note: The commitment plan is not returned from calls to GET operations.
"""
return pulumi.get(self, "commitment_plan")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The description of the web service.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def diagnostics(self) -> Optional['outputs.DiagnosticsConfigurationResponse']:
"""
Settings controlling the diagnostics traces collection for the web service.
"""
return pulumi.get(self, "diagnostics")
@property
@pulumi.getter(name="exampleRequest")
def example_request(self) -> Optional['outputs.ExampleRequestResponse']:
"""
Defines sample input data for one or more of the service's inputs.
"""
return pulumi.get(self, "example_request")
@property
@pulumi.getter(name="exposeSampleData")
def expose_sample_data(self) -> Optional[bool]:
"""
When set to true, sample data is included in the web service's swagger definition. The default value is true.
"""
return pulumi.get(self, "expose_sample_data")
@property
@pulumi.getter
def input(self) -> Optional['outputs.ServiceInputOutputSpecificationResponse']:
"""
Contains the Swagger 2.0 schema describing one or more of the web service's inputs. For more information, see the Swagger specification.
"""
return pulumi.get(self, "input")
@property
@pulumi.getter
def keys(self) -> Optional['outputs.WebServiceKeysResponse']:
"""
Contains the web service provisioning keys. If you do not specify provisioning keys, the Azure Machine Learning system generates them for you. Note: The keys are not returned from calls to GET operations.
"""
return pulumi.get(self, "keys")
@property
@pulumi.getter(name="machineLearningWorkspace")
def machine_learning_workspace(self) -> Optional['outputs.MachineLearningWorkspaceResponse']:
"""
Specifies the Machine Learning workspace containing the experiment that is source for the web service.
"""
return pulumi.get(self, "machine_learning_workspace")
@property
@pulumi.getter
def output(self) -> Optional['outputs.ServiceInputOutputSpecificationResponse']:
"""
Contains the Swagger 2.0 schema describing one or more of the web service's outputs. For more information, see the Swagger specification.
"""
return pulumi.get(self, "output")
@property
@pulumi.getter
def package(self) -> Optional['outputs.GraphPackageResponse']:
"""
The definition of the graph package making up this web service.
"""
return pulumi.get(self, "package")
@property
@pulumi.getter
def parameters(self) -> Optional[Mapping[str, 'outputs.WebServiceParameterResponse']]:
"""
The set of global parameters values defined for the web service, given as a global parameter name to default value map. If no default value is specified, the parameter is considered to be required.
"""
return pulumi.get(self, "parameters")
@property
@pulumi.getter(name="payloadsInBlobStorage")
def payloads_in_blob_storage(self) -> Optional[bool]:
"""
When set to true, indicates that the payload size is larger than 3 MB. Otherwise false. If the payload size exceed 3 MB, the payload is stored in a blob and the PayloadsLocation parameter contains the URI of the blob. Otherwise, this will be set to false and Assets, Input, Output, Package, Parameters, ExampleRequest are inline. The Payload sizes is determined by adding the size of the Assets, Input, Output, Package, Parameters, and the ExampleRequest.
"""
return pulumi.get(self, "payloads_in_blob_storage")
@property
@pulumi.getter(name="payloadsLocation")
def payloads_location(self) -> Optional['outputs.BlobLocationResponse']:
"""
The URI of the payload blob. This parameter contains a value only if the payloadsInBlobStorage parameter is set to true. Otherwise is set to null.
"""
return pulumi.get(self, "payloads_location")
@property
@pulumi.getter(name="readOnly")
def read_only(self) -> Optional[bool]:
"""
When set to true, indicates that the web service is read-only and can no longer be updated or patched, only removed. Default, is false. Note: Once set to true, you cannot change its value.
"""
return pulumi.get(self, "read_only")
@property
@pulumi.getter(name="realtimeConfiguration")
def realtime_configuration(self) -> Optional['outputs.RealtimeConfigurationResponse']:
"""
Contains the configuration settings for the web service endpoint.
"""
return pulumi.get(self, "realtime_configuration")
@property
@pulumi.getter(name="storageAccount")
def storage_account(self) -> Optional['outputs.StorageAccountResponse']:
"""
Specifies the storage account that Azure Machine Learning uses to store information about the web service. Only the name of the storage account is returned from calls to GET operations. When updating the storage account information, you must ensure that all necessary assets are available in the new storage account or calls to your web service will fail.
"""
return pulumi.get(self, "storage_account")
@property
@pulumi.getter
def title(self) -> Optional[str]:
"""
The title of the web service.
"""
return pulumi.get(self, "title")
|
py | b4048c9fcee7c0ec952c599db8a5033603d9614a | from fastapi import FastAPI, Body, status, HTTPException
from fastapi.encoders import jsonable_encoder
from app.settings import configs
from app.model import CommentSchema, UpdateCommentSchema
app = FastAPI(
title=configs.API_TITLE,
version=configs.API_VERSION,
description=configs.API_DESCRIPTION
)
@app.get("/", tags=["Home"])
def get_root() -> dict:
return {
"message": configs.API_WELCOME_MESSAGE
}
comments = [
{
"id": 1,
"name": "Michael",
"ingredients": ["Let's start a discussion, What is the best API framework in your opinion?"]
}
]
@app.get("/comment", tags=["Comment"], status_code=status.HTTP_200_OK)
def get_comments() -> dict:
return {
"data": comments
}
@app.get("/comment/{id}", tags=["Comment"], status_code=status.HTTP_200_OK)
def get_comment(id: int) -> dict:
if id > len(comments) or id < 1:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail= "Invalid ID passed.")
for comment in comments:
if comment['id'] == id:
return {
"data": [
comment
]
}
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No such comment with ID {} exist".format(id))
@app.post("/comment", tags=["Comment"], status_code=status.HTTP_201_CREATED)
def add_comment(comment: CommentSchema = Body(...)) -> dict:
comment.id = len(comments) + 1
comments.append(comment.dict())
return {
"message": "Comment added successfully."
}
@app.put("/comment", tags=["Comment"], status_code=status.HTTP_202_ACCEPTED)
def update_comment(id: int, comment_data: UpdateCommentSchema) -> dict:
stored_comment = {}
for comment in comments:
if comment["id"] == id:
stored_comment = comment
if not stored_comment:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No such comment exists.")
stored_comment_model = CommentSchema(**stored_comment)
updated_comment = comment_data.dict(exclude_unset=True)
updated_comment = stored_comment_model.copy(update=update_comment)
comments[comments.index(stored_comment_model)] = jsonable_encoder(updated_comment)
return {
"message": "Comment updated successfully."
}
@app.delete("/comment/{id}", tags=["Comment"], status_code=status.HTTP_202_ACCEPTED)
def delete_comment(id: int) -> dict:
if id > len(comments) or id < 1:
raise HTTPException(status_code=status.HTTP_409_CONFLICT, detail= "Invalid ID passed.")
for comment in comments:
if comment['id'] == id:
comments.remove(comment)
return {
"message": "Comment deleted successfully."
}
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="No such comment with ID {} exist".format(id))
|
py | b4048d9e146a7fd3e0f46647ed53439a621e9d17 | from __future__ import print_function
import distutils.spawn
import shlex
import subprocess
import sys
from setuptools import find_packages
from setuptools import setup
version = "3.12.4"
if sys.argv[1] == "release":
if not distutils.spawn.find_executable("twine"):
print(
"Please install twine:\n\n\tpip install twine\n", file=sys.stderr
)
sys.exit(1)
commands = [
"git pull origin master",
"git tag v{:s}".format(version),
"git push origin master --tag",
"python setup.py sdist",
"twine upload dist/gdown-{:s}.tar.gz".format(version),
]
for cmd in commands:
subprocess.check_call(shlex.split(cmd))
sys.exit(0)
def get_long_description():
with open("README.md") as f:
long_description = f.read()
try:
import github2pypi
return github2pypi.replace_url(
slug="wkentaro/gdown", content=long_description
)
except Exception:
return long_description
setup(
name="gdown",
version=version,
packages=find_packages(exclude=["github2pypi"]),
install_requires=["filelock", "requests[socks]", "six", "tqdm"],
description="Google Drive direct download of big files.",
long_description=get_long_description(),
long_description_content_type="text/markdown",
author="Kentaro Wada",
author_email="[email protected]",
url="http://github.com/wkentaro/gdown",
license="MIT",
keywords="Data Download",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Natural Language :: English",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: Implementation :: CPython",
],
entry_points={"console_scripts": ["gdown=gdown.cli:main"]},
)
|
py | b4048e2dd7127c16fc894f221c380c956a8ae5ce | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from brainiak.isc import isc
from statsmodels.stats.multitest import multipletests
from statistical_tests import bootstrap_test, fisher_mean
from coupling_metrics import lagged_isc
# Load in PCA-reduced LSTMS
k = 100
lstms_pca = np.load(f'results/lstms_tanh-z_pca-k{k}.npy')
# Compute simple ISC and save
n_matchups = 4
n_repeats = 8
n_players = 4
n_pairs = n_players * (n_players - 1) // 2
iscs = np.full((n_matchups, n_repeats, n_pairs, k), np.nan)
for matchup in np.arange(n_matchups):
for repeat in np.arange(n_repeats):
lstms_rep = np.moveaxis(lstms_pca[matchup, repeat], 0, 2)
iscs[matchup, repeat] = isc(lstms_rep, pairwise=True)
print("Finished computing ISC for"
f"matchup {matchup} repeat {repeat}")
np.save(f'results/iscs_tanh-z_pca-k{k}.npy', iscs)
# Plot cooperative/competitive ISC for top 10 PCs
matchup = 0
n_repeats = 8
pcs = np.arange(10)
sns.set_context('notebook', font_scale=1.2)
fig, axs = plt.subplots(2, 5, figsize=(25, 8))
for pc, ax in zip(pcs, axs.ravel()):
corr = fisher_mean([np.corrcoef(lstms_pca[matchup, r, ..., pc])
for r in np.arange(n_repeats)], axis=0)
sns.heatmap(corr, square=True, annot=True, vmin=-1, vmax=1,
cmap='RdBu_r', xticklabels=False, yticklabels=False,
fmt='.2f', ax=ax)
ax.set_title(f'PC{pc + 1}')
plt.savefig(f'figures/isc_coop-comp_tanh-z_pca-k{k}_m{matchup}.png',
dpi=300, bbox_inches='tight')
# Difference in cooperative/competitive ISC across PCs
matchup = 0
n_repeats = 8
n_pcs = 100
# Compute differences between cooperative and competitive ISCs
isc_diffs = []
isc_diffs_df = {'difference': [], 'PC': [], 'repeat': []}
for pc in np.arange(n_pcs):
corrs = [np.corrcoef(lstms_pca[matchup, r, ..., pc])
for r in np.arange(n_repeats)]
diffs = [np.mean(c[[0, 3], [1, 2]]) - np.mean(c[0:2, 2:4])
for c in corrs]
isc_pc_diffs = []
for r, diff in enumerate(diffs):
isc_diffs_df['difference'].append(diff)
isc_diffs_df['PC'].append(pc + 1)
isc_diffs_df['repeat'].append(r)
isc_pc_diffs.append(diff)
isc_diffs.append(isc_pc_diffs)
isc_diffs_df = pd.DataFrame(isc_diffs_df)
isc_diffs = np.array(isc_diffs).T
# Bootstrap test for significance of difference
observed, ci, p, distribution = bootstrap_test(isc_diffs,
bootstrap_axis=0,
n_bootstraps=1000,
estimator=fisher_mean,
ci_percentile=95,
side='two-sided')
# FDR correction of p-values
_, fdr_p, _, _ = multipletests(p, method='fdr_bh')
# Plot ISCs for 100 PCs with significance markers
sig_pos = ((fdr_p < .05) & (observed > 0)).nonzero()[0]
sig_neg = ((fdr_p < .05) & (observed < 0)).nonzero()[0]
sns.set_context('notebook', font_scale=1.2)
fig, ax = plt.subplots(figsize=(16, 4))
sns.barplot(x='PC', y='difference', data=isc_diffs_df, ax=ax, color='.6',
estimator=fisher_mean)
#ax.set_ylim(-.375, .325) # for matchup = 3 (sig y = -.01)
ax.set_ylim(-.3, 1) # for matchup = 0
ax.set_xticks([0, 19, 39, 59, 79, 99])
for sig_pc in sig_pos:
ax.annotate('.', (sig_pc, -.02), color='tab:red', size=40,
xycoords=('data', 'axes fraction'),
ha='center', va='bottom')
for sig_pc in sig_neg:
ax.annotate('.', (sig_pc, -.02), color='tab:blue', size=40,
xycoords=('data', 'axes fraction'),
ha='center', va='bottom')
ax.set_ylabel('cooperative – competitive ISC')
ax.set_title(f'difference in cooperative vs. competitive ISC for 100 PCs');
sns.despine()
plt.savefig(f'figures/isc_diff-bars_tanh-z_pca-k{k}_m{matchup}.png',
dpi=300, bbox_inches='tight')
# Load in PCA-reduced LSTMs with confounds regressed out
reg = 'com' # 'pre', 'hud', 'act', or 'com'
lstms_pca_reg = np.load(f'results/lstms_tanh-z_pca-k{k}_reg-{reg}.npy')
# Compute simple ISC and save
n_matchups = 4
n_repeats = 8
n_players = 4
n_pairs = n_players * (n_players - 1) // 2
iscs = np.full((n_matchups, n_repeats, n_pairs, k), np.nan)
for matchup in np.arange(n_matchups):
for repeat in np.arange(n_repeats):
lstms_rep = np.moveaxis(lstms_pca_reg[matchup, repeat], 0, 2)
iscs[matchup, repeat] = isc(lstms_rep, pairwise=True)
print("Finished computing ISC for"
f"matchup {matchup} repeat {repeat}")
np.save(f'results/iscs_tanh-z_pca-k{k}_reg-{reg}.npy', iscs)
# Plot cooperative/competitive ISC for top 10 PCs
matchup = 0
n_repeats = 8
pcs = np.arange(10)
sns.set_context('notebook', font_scale=1.2)
fig, axs = plt.subplots(2, 5, figsize=(25, 8))
for pc, ax in zip(pcs, axs.ravel()):
corr = fisher_mean([np.corrcoef(lstms_pca_reg[matchup, r, ..., pc])
for r in np.arange(n_repeats)], axis=0)
sns.heatmap(corr, square=True, annot=True, vmin=-1, vmax=1,
cmap='RdBu_r', xticklabels=False, yticklabels=False,
fmt='.2f', ax=ax)
ax.set_title(f'PC{pc + 1}')
plt.savefig(f'figures/isc_coop-comp_tanh-z_pca-k{k}_reg-{reg}_m{matchup}.png',
dpi=300, bbox_inches='tight')
# Compute differences between cooperative and competitive ISCs
matchup = 0
n_repeats = 8
n_pcs = 100
isc_diffs, isc_coops = [], []
isc_diffs_df = {'difference': [], 'PC': [], 'repeat': []}
for pc in np.arange(n_pcs):
corrs = [np.corrcoef(lstms_pca_reg[matchup, r, ..., pc])
for r in np.arange(n_repeats)]
coops = [np.mean(c[[0, 3], [1, 2]]) for c in corrs]
diffs = [np.mean(c[[0, 3], [1, 2]]) - np.mean(c[0:2, 2:4])
for c in corrs]
isc_coops.append(coops)
isc_diffs.append(diffs)
isc_pc_diffs = []
for r, diff in enumerate(diffs):
isc_diffs_df['difference'].append(diff)
isc_diffs_df['PC'].append(pc + 1)
isc_diffs_df['repeat'].append(r)
isc_pc_diffs.append(diff)
isc_diffs_df = pd.DataFrame(isc_diffs_df)
isc_coops = np.array(isc_coops).T
isc_diffs = np.array(isc_diffs).T
# Get PCs with largest difference between cooperative/competitive
n_top = 10
isc_diff_means = fisher_mean(isc_diffs, axis=0)
top_diffs = np.argpartition(isc_diff_means, -n_top)[-n_top:]
top_diffs = top_diffs[np.argsort(isc_diff_means[top_diffs])[::-1]]
# Get PCs with largest cooperative ISC (irrespective of competitive ISC)
n_top = 10
isc_coop_means = fisher_mean(isc_coops, axis=0)
top_coops = np.argpartition(isc_coop_means, -n_top)[-n_top:]
top_coops = top_coops[np.argsort(isc_coop_means[top_coops])[::-1]]
# Find overlap between top PCs
top_both = list(set(top_diffs) & set(top_coops))
# For matchup 0: [2, 7, 9, 23, 24]
# Bootstrap test for significance of difference
observed, ci, p, distribution = bootstrap_test(isc_diffs,
bootstrap_axis=0,
n_bootstraps=1000,
estimator=fisher_mean,
ci_percentile=95,
side='two-sided')
# FDR correction of p-values
_, fdr_p, _, _ = multipletests(p, method='fdr_bh')
# Plot ISCs for 100 PCs with significance markers
sig_pos = ((fdr_p < .05) & (observed > 0)).nonzero()[0]
sig_neg = ((fdr_p < .05) & (observed < 0)).nonzero()[0]
sns.set_context('notebook', font_scale=1.2)
fig, ax = plt.subplots(figsize=(16, 4))
sns.barplot(x='PC', y='difference', data=isc_diffs_df, ax=ax, color='.6',
estimator=fisher_mean)
#ax.set_ylim(-.375, .325) # for matchup = 3
ax.set_ylim(-.3, 1) # for matchup = 0
ax.set_xticks([0, 19, 39, 59, 79, 99])
for sig_pc in sig_pos:
ax.annotate('.', (sig_pc, -.02), color='tab:red', size=40,
xycoords=('data', 'axes fraction'),
ha='center', va='bottom')
for sig_pc in sig_neg:
ax.annotate('.', (sig_pc, -.02), color='tab:blue', size=40,
xycoords=('data', 'axes fraction'),
ha='center', va='bottom')
ax.set_ylabel('cooperative – competitive ISC')
ax.set_title(f'difference in cooperative vs. competitive ISC for 100 PCs');
sns.despine()
plt.savefig('figures/isc_diff-bars_tanh-z_'
f'pca-k{k}_reg-{reg}_m{matchup}.png',
dpi=300, bbox_inches='tight')
# Zoom in and replot to highlight top PCs
from matplotlib.patches import Patch
colors = np.array(['.7'] * k, dtype='object')
colors[top_coops] = 'tab:red'
colors[top_diffs] = 'tab:blue'
colors[top_both] = 'tab:purple'
np.save('figures/colors_top-bars_tanh-z_'
f'pca-k{k}_reg-{reg}_m{matchup}.npy', colors)
sns.set_context('notebook', font_scale=1.2)
fig, ax = plt.subplots(figsize=(16, 4))
sns.barplot(x='PC', y='difference', data=isc_diffs_df, ax=ax, color='.6',
estimator=fisher_mean, palette=colors)
#ax.set_ylim(-.375, .325) # for matchup = 3
ax.set_ylim(-.05, .4) # for matchup = 0
ax.set_xticks([0, 19, 39, 59, 79, 99])
ax.set_ylabel('cooperative – competitive ISC')
ax.set_title(f'difference in cooperative vs. competitive ISC for 100 PCs')
sns.despine()
legend_elements = [Patch(facecolor='tab:red'),
Patch(facecolor='tab:blue'),
Patch(facecolor='tab:purple'),
Patch(facecolor='tab:purple')]
ax.legend(handles=legend_elements, loc='upper right',
labels=['', '', 'top 10 cooperative PCs',
'top 10 difference PCs'],
ncol=2, handletextpad=0.5, handlelength=1.0, columnspacing=-0.5)
plt.savefig('figures/isc_top-bars_tanh-z_'
f'pca-k{k}_reg-{reg}_m{matchup}.png',
dpi=300, bbox_inches='tight')
# Plot cooperative/competitive ISC for top 10 PCs
matchup = 0
n_repeats = 8
pcs = top_both
fig, axs = plt.subplots(1, 5, figsize=(18, 8))
for pc, ax in zip(pcs, axs.ravel()):
corr = fisher_mean([np.corrcoef(lstms_pca_reg[matchup, r, ..., pc])
for r in np.arange(n_repeats)], axis=0)
sns.heatmap(corr, square=True, annot=True, vmin=-1, vmax=1,
cmap='RdBu_r', xticklabels=False, yticklabels=False,
fmt='.2f', ax=ax, cbar_kws={'shrink': .32})
ax.set_title(f'PC{pc + 1}')
plt.savefig(f'figures/isc_top-coop_tanh-z_pca-k{k}_reg-{reg}_m{matchup}.png',
dpi=300, bbox_inches='tight')
# Lagged ISC for selected PCs
matchup = 0
n_repeats = 8
n_lags = 900
pc_ids = np.arange(10)
# Compute lagged ISC for each repeat
lagged_iscs = []
for repeat in np.arange(n_repeats):
# Slicing with array seems to shift axis?
lstms_rep = np.moveaxis(lstms_pca[matchup, repeat, ..., pc_ids], 2, 0)
lagged_rep, lags = lagged_isc(lstms_rep, n_lags=n_lags, circular=True)
lagged_iscs.append(lagged_rep)
print(f"Finished computing lagged ISC for repeat {repeat}")
lagged_iscs = np.stack(lagged_iscs, axis=0)
# Get lagged ISCs for cooperative pairs
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lagged_coop = np.mean(lagged_iscs[:, coop_ids, ...], axis=1)
lagged_comp = np.mean(lagged_iscs[:, coop_ids, ...], axis=1)
# Bootstrap test to assess significance
observed, ci, ps, distribution = bootstrap_test(lagged_coop,
bootstrap_axis=0,
n_bootstraps=1000,
estimator=fisher_mean,
ci_percentile=95,
side='right')
# FDR correction across lags
fdr_ps = []
for p in ps:
_, fdr_p, _, _ = multipletests(p, method='fdr_bh')
fdr_ps.append(fdr_p)
fdr_ps = np.array(fdr_ps)
# Plot lagged ISC with significance indicator
n_rows, n_cols = 5, 2
fig, axs = plt.subplots(n_rows, n_cols, figsize=(9, 8))
pc_ids = np.arange(10)
threshold = .02
sig_ids = (fdr_ps[pc_id] <= threshold).nonzero()[0]
for i, (pc_id, ax) in enumerate(zip(pc_ids, axs.ravel())):
ax.plot(lags, np.concatenate(lagged_iscs[:, coop_ids, pc_id]).T,
color='.8', alpha=.5, zorder=1);
ax.plot(lags, np.mean(lagged_coop[:, pc_id, :], axis=0),
color='.4', zorder=2);
if i not in [8, 9]:
ax.xaxis.set_ticks([])
else:
ax.set_xticks(lags[::15 * 10])
ax.set_xticklabels(np.unique(lags // 15)[::10])
ax.set_xlabel('lag (seconds)')
if i % 2 != 0:
ax.yaxis.set_ticks([])
ax.set_ylim(-.3, .7)
ax.set_xlim(-n_lags, n_lags)
ax.set_title(f'PC{pc_id + 1} cooperative ISC',
loc='left', va='top', x=.02, y=.95)
sns.despine()
plt.tight_layout()
plt.savefig('figures/isc_lag-60s_tanh-z_'
f'pca-k{k}_m{matchup}.png',
dpi=300, bbox_inches='tight')
#plt.scatter(lags[sig_ids], np.mean(lagged_coop[:, pc_id], axis=0)[sig_ids],
# color='tab:red', marker='.', zorder=3)
# Load in PCA-reduced LSTMs with confounds regressed out
reg = 'com' # 'pre', 'hud', 'act', or 'com'
lstms_pca_reg = np.load(f'results/lstms_tanh-z_pca-k{k}_reg-{reg}.npy')
# Lagged ISC for selected PCs
matchup = 0
n_repeats = 8
n_lags = 900
pc_ids = np.arange(10)
# Compute lagged ISC for each repeat
lagged_iscs = []
for repeat in np.arange(n_repeats):
# Slicing with array seems to shift axis?
lstms_rep = np.moveaxis(lstms_pca_reg[matchup, repeat, ..., pc_ids], 2, 0)
lagged_rep, lags = lagged_isc(lstms_rep, n_lags=n_lags, circular=True)
lagged_iscs.append(lagged_rep)
print(f"Finished computing lagged ISC for repeat {repeat}")
lagged_iscs = np.stack(lagged_iscs, axis=0)
# Get lagged ISCs for cooperative pairs
coop_ids, comp_ids = [0, 5], [1, 2, 3, 4]
lagged_coop = np.mean(lagged_iscs[:, coop_ids, ...], axis=1)
lagged_comp = np.mean(lagged_iscs[:, coop_ids, ...], axis=1)
# Bootstrap test to assess significance
observed, ci, ps, distribution = bootstrap_test(lagged_coop,
bootstrap_axis=0,
n_bootstraps=1000,
estimator=fisher_mean,
ci_percentile=95,
side='right')
# FDR correction across lags
fdr_ps = []
for p in ps:
_, fdr_p, _, _ = multipletests(p, method='fdr_bh')
fdr_ps.append(fdr_p)
fdr_ps = np.array(fdr_ps)
# Plot lagged ISC with significance indicator
n_rows, n_cols = 5, 2
fig, axs = plt.subplots(n_rows, n_cols, figsize=(9, 8))
pc_ids = np.arange(10)
threshold = .02
sig_ids = (fdr_ps[pc_id] <= threshold).nonzero()[0]
for i, (pc_id, ax) in enumerate(zip(pc_ids, axs.ravel())):
ax.plot(lags, np.concatenate(lagged_iscs[:, coop_ids, pc_id]).T,
color='.8', alpha=.5, zorder=1);
ax.plot(lags, np.mean(lagged_coop[:, pc_id, :], axis=0),
color='.4', zorder=2);
if i not in [8, 9]:
ax.xaxis.set_ticks([])
else:
ax.set_xticks(lags[::15 * 10])
ax.set_xticklabels(np.unique(lags // 15)[::10])
ax.set_xlabel('lag (seconds)')
if i % 2 != 0:
ax.yaxis.set_ticks([])
ax.set_ylim(-.3, .7)
ax.set_xlim(-n_lags, n_lags)
ax.set_title(f'PC{pc_id + 1} cooperative ISC',
loc='left', va='top', x=.02, y=.95)
sns.despine()
plt.tight_layout()
plt.savefig('figures/isc_lag-60s_tanh-z_'
f'pca-k{k}_reg-{reg}_m{matchup}.png',
dpi=300, bbox_inches='tight')
#plt.scatter(lags[sig_ids], np.mean(lagged_coop[:, pc_id], axis=0)[sig_ids],
# color='tab:red', marker='.', zorder=3)
|
py | b4048e7ebd98005e6e6b0dff80f9e8783b471667 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "query.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | b4048ecbe8d04527d184c762085210192ae7b9dc | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
class DeployError(Exception):
def __init__(self, msg: str, module: str = 'system', model: str = ''):
super().__init__(self)
self.msg = msg
self.module = module
self.model = model
def __str__(self):
text = ''
if self.module:
text += '[%s]' % self.module
if self.model:
text += '<%s>' % self.model
return text + ' ' + self.msg
|
py | b404908f55b11ca38f989253dbfbb6ffbab804bd | from Jumpscale import j
class LoggerFactory(j.application.JSBaseClass):
__jslocation__ = "j.tools.logger"
# _CHILDCLASS = LoggerBase
# _LoggerInstance = LoggerInstance
@property
def debug(self):
return j.core.myenv.config["DEBUG"]
@debug.setter
def debug(self, value):
assert j.data.types.bool.check(value)
config = {}
config["DEBUG"] = value
self.config = config
@property
def config(self):
res = {}
for name in j.core.myenv.config.keys():
if name.startswith("LOGGER") or name == "DEBUG":
res[name] = j.core.myenv.config[name]
return res
@config.setter
def config(self, value):
"""
default :
{'DEBUG': True,
'LOGGER_INCLUDE': ['*'],
'LOGGER_EXCLUDE': ['sal.fs'],
'LOGGER_LEVEL': 15,
'LOGGER_CONSOLE': False,
'LOGGER_REDIS': True
'LOGGER_REDIS_ADDR': None #NOT USED YET, std on the core redis
'LOGGER_REDIS_PORT': None
'LOGGER_REDIS_SECRET': None
}
:param value: dict with config properties, can be all or some of the above
:return:
"""
assert j.data.types.dict.check(value)
changed = False
for name in j.core.myenv.config.keys():
if name.startswith("LOGGER") or name == "DEBUG":
if name in value:
if j.core.myenv.config[name] != value[name]:
changed = True
self._log_debug("changed in config: %s:%s" % (name, value[name]))
j.core.myenv.config[name] = value[name]
if changed:
j.core.myenv.config_save()
self.reload()
def reload(self):
"""
kosmos 'j.tools.logger.reload()'
will walk over jsbase classes & reload the logging config
:return:
"""
for obj in j.application._iterate_rootobj():
obj._logger_set(children=True)
# self._print(obj._key)
def test(self, name="base"):
"""
kosmos 'j.tools.logger.test()'
"""
self._test_run(name=name)
|
py | b40490df4989cdf221fb7754f4580e3e2614d84c |
class DummyCharDisplayDriver(object):
"""Class used in tests"""
def __init__(self):
pass
@property
def display_type(self):
return "char"
def home(self):
"""Move the cursor back to its home (first line and first column)."""
print("dummy lcd: home")
def clear(self):
"""Clear the LCD."""
print("dummy lcd: clear")
def set_cursor(self, col, row):
"""Move the cursor to an explicit column and row position."""
print("dummy lcd: set_cursor", col, row)
def enable_display(self, enable):
"""Enable or disable the display. Set enable to True to enable."""
print("dummy lcd: enable", enable)
def show_cursor(self, show):
"""Show or hide the cursor. Cursor is shown if show is True."""
print("dummy lcd: show_cursor", show)
def blink(self, blink):
"""Turn on or off cursor blinking. Set blink to True to enable blinking."""
print("dummy lcd: blink", blink)
def move_left(self):
"""Move display left one position."""
print("dummy lcd: move_left")
def move_right(self):
"""Move display right one position."""
print("dummy lcd: right")
def set_left_to_right(self):
"""Set text direction left to right."""
print("dummy lcd: set_left_right")
def set_right_to_left(self):
"""Set text direction right to left."""
print("dummy lcd: set_right_to_Left")
def autoscroll(self, autoscroll):
"""Autoscroll will 'right justify' text from the cursor if set True,
otherwise it will 'left justify' the text.
"""
print("dummy lcd: autoscroll")
def message(self, text):
"""Write text to display. Note that text can include newlines."""
print("dummy lcd: message", text)
def set_backlight(self, backlight):
"""Enable or disable the backlight. If PWM is not enabled (default), a
non-zero backlight value will turn on the backlight and a zero value will
turn it off. If PWM is enabled, backlight can be any value from 0.0 to
1.0, with 1.0 being full intensity backlight.
"""
print("dummy lcd: backlight", backlight)
def create_char(self, location, pattern):
"""Fill one of the first 8 CGRAM locations with custom characters.
The location parameter should be between 0 and 7 and pattern should
provide an array of 8 bytes containing the pattern. E.g. you can easyly
design your custom character at http://www.quinapalus.com/hd44780udg.html
To show your custom character use eg. lcd.message('\x01')
"""
print("dummy lcd: create_char", location, pattern)
class DummyBitmapDisplayDriver(object):
"""Base class for SSD1306-based OLED displays. Implementors should subclass
and provide an implementation for the _initialize function.
"""
def __init__(self, width, height):
self.width = width
self.height = height
self._pages = height//8
self._buffer = [0]*(width*self._pages)
@property
def display_type(self):
return "bitmap"
def reset(self):
"""Reset the display."""
print("bitmap display: reset")
def display(self):
"""Write display buffer to physical display."""
print("bitmap display: display")
def image(self, image):
"""Set buffer to value of Python Imaging Library image. The image should
be in 1 bit mode and a size equal to the display size.
"""
if image.mode != '1':
raise ValueError('Image must be in mode 1.')
imwidth, imheight = image.size
if imwidth != self.width or imheight != self.height:
raise ValueError('Image must be same dimensions as display ({0}x{1}).' \
.format(self.width, self.height))
print("bitmap display: image")
image.save("dummydisplay.bmp")
def clear(self):
"""Clear contents of image buffer."""
print("bitmap display: clear")
def set_contrast(self, contrast):
"""Sets the contrast of the display. Contrast should be a value between
0 and 255."""
print("bitmap display: set_contrast", contrast)
def dim(self, dim):
"""Adjusts contrast to dim the display if dim is True, otherwise sets the
contrast to normal brightness if dim is False.
"""
print("bitmap display: dim", dim) |
py | b4049106d655c76bec08d59c86f1f0b01ca7ef6c | """
SE-PreResNet for ImageNet-1K, implemented in PyTorch.
Original paper: 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
"""
__all__ = ['SEPreResNet', 'sepreresnet10', 'sepreresnet12', 'sepreresnet14', 'sepreresnet16', 'sepreresnet18',
'sepreresnet26', 'sepreresnetbc26b', 'sepreresnet34', 'sepreresnetbc38b', 'sepreresnet50', 'sepreresnet50b',
'sepreresnet101', 'sepreresnet101b', 'sepreresnet152', 'sepreresnet152b', 'sepreresnet200',
'sepreresnet200b', 'SEPreResUnit']
import os
import torch.nn as nn
import torch.nn.init as init
from .common import conv1x1, SEBlock
from .preresnet import PreResBlock, PreResBottleneck, PreResInitBlock, PreResActivation
class SEPreResUnit(nn.Module):
"""
SE-PreResNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int
Strides of the convolution.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer of the block.
"""
def __init__(self,
in_channels,
out_channels,
stride,
bottleneck,
conv1_stride):
super(SEPreResUnit, self).__init__()
self.resize_identity = (in_channels != out_channels) or (stride != 1)
if bottleneck:
self.body = PreResBottleneck(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
conv1_stride=conv1_stride)
else:
self.body = PreResBlock(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
self.se = SEBlock(channels=out_channels)
if self.resize_identity:
self.identity_conv = conv1x1(
in_channels=in_channels,
out_channels=out_channels,
stride=stride)
def forward(self, x):
identity = x
x, x_pre_activ = self.body(x)
x = self.se(x)
if self.resize_identity:
identity = self.identity_conv(x_pre_activ)
x = x + identity
return x
class SEPreResNet(nn.Module):
"""
SE-PreResNet model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
bottleneck : bool
Whether to use a bottleneck or simple block in units.
conv1_stride : bool
Whether to use stride in the first or the second convolution layer in units.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
num_classes : int, default 1000
Number of classification classes.
"""
def __init__(self,
channels,
init_block_channels,
bottleneck,
conv1_stride,
in_channels=3,
in_size=(224, 224),
num_classes=1000):
super(SEPreResNet, self).__init__()
self.in_size = in_size
self.num_classes = num_classes
self.features = nn.Sequential()
self.features.add_module("init_block", PreResInitBlock(
in_channels=in_channels,
out_channels=init_block_channels))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = nn.Sequential()
for j, out_channels in enumerate(channels_per_stage):
stride = 1 if (i == 0) or (j != 0) else 2
stage.add_module("unit{}".format(j + 1), SEPreResUnit(
in_channels=in_channels,
out_channels=out_channels,
stride=stride,
bottleneck=bottleneck,
conv1_stride=conv1_stride))
in_channels = out_channels
self.features.add_module("stage{}".format(i + 1), stage)
self.features.add_module("post_activ", PreResActivation(in_channels=in_channels))
self.features.add_module("final_pool", nn.AvgPool2d(
kernel_size=7,
stride=1))
self.output = nn.Linear(
in_features=in_channels,
out_features=num_classes)
self._init_params()
def _init_params(self):
for name, module in self.named_modules():
if isinstance(module, nn.Conv2d):
init.kaiming_uniform_(module.weight)
if module.bias is not None:
init.constant_(module.bias, 0)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.output(x)
return x
def get_sepreresnet(blocks,
bottleneck=None,
conv1_stride=True,
model_name=None,
pretrained=False,
root=os.path.join("~", ".torch", "models"),
**kwargs):
"""
Create SE-PreResNet model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
bottleneck : bool, default None
Whether to use a bottleneck or simple block in units.
conv1_stride : bool, default True
Whether to use stride in the first or the second convolution layer in units.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
if bottleneck is None:
bottleneck = (blocks >= 50)
if blocks == 10:
layers = [1, 1, 1, 1]
elif blocks == 12:
layers = [2, 1, 1, 1]
elif blocks == 14 and not bottleneck:
layers = [2, 2, 1, 1]
elif (blocks == 14) and bottleneck:
layers = [1, 1, 1, 1]
elif blocks == 16:
layers = [2, 2, 2, 1]
elif blocks == 18:
layers = [2, 2, 2, 2]
elif (blocks == 26) and not bottleneck:
layers = [3, 3, 3, 3]
elif (blocks == 26) and bottleneck:
layers = [2, 2, 2, 2]
elif blocks == 34:
layers = [3, 4, 6, 3]
elif (blocks == 38) and bottleneck:
layers = [3, 3, 3, 3]
elif blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
elif blocks == 269:
layers = [3, 30, 48, 8]
else:
raise ValueError("Unsupported SE-PreResNet with number of blocks: {}".format(blocks))
if bottleneck:
assert (sum(layers) * 3 + 2 == blocks)
else:
assert (sum(layers) * 2 + 2 == blocks)
init_block_channels = 64
channels_per_layers = [64, 128, 256, 512]
if bottleneck:
bottleneck_factor = 4
channels_per_layers = [ci * bottleneck_factor for ci in channels_per_layers]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = SEPreResNet(
channels=channels,
init_block_channels=init_block_channels,
bottleneck=bottleneck,
conv1_stride=conv1_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import download_model
download_model(
net=net,
model_name=model_name,
local_model_store_dir_path=root)
return net
def sepreresnet10(**kwargs):
"""
SE-PreResNet-10 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=10, model_name="sepreresnet10", **kwargs)
def sepreresnet12(**kwargs):
"""
SE-PreResNet-12 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=12, model_name="sepreresnet12", **kwargs)
def sepreresnet14(**kwargs):
"""
SE-PreResNet-14 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=14, model_name="sepreresnet14", **kwargs)
def sepreresnet16(**kwargs):
"""
SE-PreResNet-16 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=16, model_name="sepreresnet16", **kwargs)
def sepreresnet18(**kwargs):
"""
SE-PreResNet-18 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=18, model_name="sepreresnet18", **kwargs)
def sepreresnet26(**kwargs):
"""
SE-PreResNet-26 from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=False, model_name="sepreresnet26", **kwargs)
def sepreresnetbc26b(**kwargs):
"""
SE-PreResNet-BC-26b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=26, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc26b", **kwargs)
def sepreresnet34(**kwargs):
"""
SE-PreResNet-34 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=34, model_name="sepreresnet34", **kwargs)
def sepreresnetbc38b(**kwargs):
"""
SE-PreResNet-BC-38b model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=38, bottleneck=True, conv1_stride=False, model_name="sepreresnetbc38b", **kwargs)
def sepreresnet50(**kwargs):
"""
SE-PreResNet-50 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, model_name="sepreresnet50", **kwargs)
def sepreresnet50b(**kwargs):
"""
SE-PreResNet-50 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=50, conv1_stride=False, model_name="sepreresnet50b", **kwargs)
def sepreresnet101(**kwargs):
"""
SE-PreResNet-101 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, model_name="sepreresnet101", **kwargs)
def sepreresnet101b(**kwargs):
"""
SE-PreResNet-101 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=101, conv1_stride=False, model_name="sepreresnet101b", **kwargs)
def sepreresnet152(**kwargs):
"""
SE-PreResNet-152 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, model_name="sepreresnet152", **kwargs)
def sepreresnet152b(**kwargs):
"""
SE-PreResNet-152 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=152, conv1_stride=False, model_name="sepreresnet152b", **kwargs)
def sepreresnet200(**kwargs):
"""
SE-PreResNet-200 model from 'Squeeze-and-Excitation Networks,' https://arxiv.org/abs/1709.01507. It's an
experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, model_name="sepreresnet200", **kwargs)
def sepreresnet200b(**kwargs):
"""
SE-PreResNet-200 model with stride at the second convolution in bottleneck block from 'Squeeze-and-Excitation
Networks,' https://arxiv.org/abs/1709.01507. It's an experimental model.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.torch/models'
Location for keeping the model parameters.
"""
return get_sepreresnet(blocks=200, conv1_stride=False, model_name="sepreresnet200b", **kwargs)
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
def _test():
import torch
pretrained = False
models = [
sepreresnet10,
sepreresnet12,
sepreresnet14,
sepreresnet16,
sepreresnet18,
sepreresnet26,
sepreresnetbc26b,
sepreresnet34,
sepreresnetbc38b,
sepreresnet50,
sepreresnet50b,
sepreresnet101,
sepreresnet101b,
sepreresnet152,
sepreresnet152b,
sepreresnet200,
sepreresnet200b,
]
for model in models:
net = model(pretrained=pretrained)
# net.train()
net.eval()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
assert (model != sepreresnet10 or weight_count == 5461668)
assert (model != sepreresnet12 or weight_count == 5536232)
assert (model != sepreresnet14 or weight_count == 5833840)
assert (model != sepreresnet16 or weight_count == 7022976)
assert (model != sepreresnet18 or weight_count == 11776928)
assert (model != sepreresnet26 or weight_count == 18092188)
assert (model != sepreresnetbc26b or weight_count == 17388424)
assert (model != sepreresnet34 or weight_count == 21957204)
assert (model != sepreresnetbc38b or weight_count == 24019064)
assert (model != sepreresnet50 or weight_count == 28080472)
assert (model != sepreresnet50b or weight_count == 28080472)
assert (model != sepreresnet101 or weight_count == 49319320)
assert (model != sepreresnet101b or weight_count == 49319320)
assert (model != sepreresnet152 or weight_count == 66814296)
assert (model != sepreresnet152b or weight_count == 66814296)
assert (model != sepreresnet200 or weight_count == 71828312)
assert (model != sepreresnet200b or weight_count == 71828312)
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 1000))
if __name__ == "__main__":
_test()
|
py | b4049168b843d4798c42d2c44c30cf64274bb583 | # Generated by Django 3.1.4 on 2021-11-17 20:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('morad', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='car',
name='price',
field=models.FloatField(default=0, max_length=32),
),
]
|
py | b4049258dcfe3c84596f76e66b6fcbf7b73928da | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "rrsx.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | b4049290c2ae8d5f39f084ee3f32cbebde914e0f | #!/usr/bin/env python
import sys, time
data_in = sys.argv[1] if len(sys.argv[1:]) > 0 else 'inputs/set01/aoc23.in'
def load(file):
with open(file) as x: output = x.read()
return [line for line in output.split('\n') if line]
def solve(data, regs):
ip = 0
while(ip < len(data)):
tokens = [x.replace(',','') for x in data[ip].split()]
if (tokens[0] == 'hlf'):
regs[tokens[1]]/=2
ip+=1
elif (tokens[0] == 'tpl'):
regs[tokens[1]]*=3
ip+=1
elif (tokens[0] == 'inc'):
regs[tokens[1]]+=1
ip+=1
elif (tokens[0] == 'jmp'): ip+=int(tokens[1].replace('+',''))
elif (tokens[0] == 'jio'): ip+=int(tokens[2].replace('+','')) if regs[tokens[1]] == 1 else 1
elif (tokens[0] == 'jie'): ip+=1 if regs[tokens[1]] % 2 else int(tokens[2].replace('+',''))
return regs
data=load(data_in)
print "Pt1: {}\nPt2: {}".format(solve(data, {'a': 0, 'b': 0})['b'],solve(data, {'a': 1, 'b': 0})['b'])
|
py | b40492e041488d4edde7dcc20bbfb05c530c5ee6 | from django.urls import path
from bargain.views import show_purchases, create_purchase, remove_purchase
urlpatterns = (
path('<int:pk>/', show_purchases, name='purchases'),
path('<int:user_pk>/<int:product_pk>', remove_purchase, name='remove purchase'),
path('add_purchase/<int:user_pk>/<int:watch_pk>', create_purchase, name='create purchase'),
)
|
py | b40493185d4e8e2935e68c96d7412e5c7aa7140a | # Bootstrap installation of Distribute
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
from hyde.version import __version__
from distutils.util import convert_path
from fnmatch import fnmatchcase
import os
import sys
PROJECT = 'hyde'
try:
long_description = open('README.rst', 'rt').read()
except IOError:
long_description = ''
################################################################################
# find_package_data is an Ian Bicking creation.
# Provided as an attribute, so you can append to these instead
# of replicating them:
standard_exclude = ('*.py', '*.pyc', '*~', '.*', '*.bak', '*.swp*')
standard_exclude_directories = ('.*', 'CVS', '_darcs', './build',
'./dist', 'EGG-INFO', '*.egg-info')
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn, prefix + name + '/', package, only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix+name)
return out
################################################################################
setup(name=PROJECT,
version=__version__,
description='hyde is a static website generator',
long_description = long_description,
author='Lakshmi Vyas',
author_email='[email protected]',
url='http://hyde.github.com',
packages=find_packages(),
requires=['python (>= 2.7)'],
install_requires=(
'fswrap==0.1.2',
'commando==0.3.4',
'PyYAML==3.10',
'Markdown==2.3.1',
'MarkupSafe==0.18',
'Pygments==1.6',
'typogrify==2.0.0',
'smartypants<1.8',
'Jinja2==2.7.1'
),
tests_require=(
'nose', 'mock'
),
test_suite='nose.collector',
include_package_data = True,
# Scan the input for package information
# to grab any data files (text, images, etc.)
# associated with sub-packages.
package_data = find_package_data(PROJECT,
package=PROJECT,
only_in_packages=False,
),
entry_points={
'console_scripts': [
'hyde = hyde.main:main'
]
},
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Operating System :: MacOS :: MacOS X',
'Operating System :: Unix',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Software Development :: Build Tools',
'Topic :: Software Development :: Code Generators',
'Topic :: Internet',
'Topic :: Internet :: WWW/HTTP :: Site Management',
],
zip_safe=False,
)
|
py | b40493a38c9289ceae327dd3c17936e677e34d4a | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 4 23:32:46 2021
@author: orkun
"""
import sys
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication, QMainWindow, QFileDialog
from PyQt5.QtGui import QStandardItemModel
from import_file import File_GET
from process_steps import processes
import random
class pre_process_ui(QMainWindow):
def __init__(self, *args):
QMainWindow.__init__(self, *args)
uic.loadUi('design/PreProcess.ui', self)
self.model = QStandardItemModel(self)
self.tview.setModel(self.model)
self.selectmodel = self.tview.selectionModel()
self.btnPrev.setEnabled(False)
self.tabWidget.currentChanged.connect(self.tab_change)
self.btnLoad.clicked.connect(self.open_filename)
self.btnNext.clicked.connect(self.next_tab)
self.btnPrev.clicked.connect(self.prev_tab)
self.btndata.clicked.connect(self.load_data)
self.delimeter = ";"
self.cmblist = [";", ",", ":", "-"]
self.cbSep.addItems(self.cmblist)
self.cbSep.currentTextChanged.connect(self.on_combobox_changed)
self.miss_formats = 'n.a.,NA,na,--,NAN,nan'
self.leMissing.setText(self.miss_formats)
self.leMissing.textChanged.connect(self.on_label_changed)
self.selectmodel.selectionChanged.connect(self.on_selectionChanged)
self.btnProcess.clicked.connect(self.do_process)
self.show()
def open_filename(self):
fileName, _ = QFileDialog.getOpenFileName(
self, "Select (.csv) File", "", "All Files (*);;csv Files (*.csv)")
if fileName:
self.leFileName.setText(fileName)
def next_tab(self):
cur_index = self.tabWidget.currentIndex()
if cur_index < len(self.tabWidget)-1:
self.tabWidget.setCurrentIndex(cur_index+1)
def prev_tab(self):
cur_index = self.tabWidget.currentIndex()
if cur_index > 0:
self.tabWidget.setCurrentIndex(cur_index-1)
def tab_change(self):
cur_index = self.tabWidget.currentIndex()
if cur_index == len(self.tabWidget)-1:
self.btnNext.setEnabled(False)
self.btnPrev.setEnabled(True)
elif cur_index < len(self.tabWidget)-1 and cur_index > 0:
self.btnNext.setEnabled(True)
self.btnPrev.setEnabled(True)
elif cur_index == 0:
self.btnPrev.setEnabled(False)
self.btnNext.setEnabled(True)
def on_combobox_changed(self):
self.delimeter = self.cbSep.currentText()
def on_label_changed(self):
self.miss_formats = self.leMissing.text()
def on_selectionChanged(self):
self.lblcolhead.setText("Selected Columns :" + str([col.data() for col in self.selectmodel.selectedColumns()]))
self.col_list = list(col.data() for col in self.selectmodel.selectedColumns())
def load_data(self):
file_class = File_GET(self.model, self.leFileName.text(), self.delimeter, self.miss_formats)
self.df = file_class.file_import_csv()
def do_process(self):
process_list = [self.chkmiss.isChecked(), self.chkoutlier.isChecked(), self.chkscale.isChecked()]
export_list = [self.chkfile.isChecked(), self.chkgraph.isChecked()]
if any(export_list):
process_class = processes(process_list, export_list, self.df, self.col_list, self.miss_formats)
data_get = process_class.process_steps()
if export_list[0]:
if data_get is not None:
# ----Export File
try:
fileName, selectedFilter = QFileDialog.getSaveFileName(self, "Save File",
str(random.randint(0, 10000)) + "_documentView", "CSV Files (*.csv)")
if fileName:
data_get.to_csv(fileName, index=False, header=True)
# return True
except IOError:
print("Write Error:", fileName)
# return False
if __name__ == "__main__":
app = QApplication.instance()
if app is None:
app = QApplication(sys.argv)
window = pre_process_ui()
sys.exit(app.exec_())
|
py | b40493db892c4820e308b0b7e4f6760fcc83f761 | #!/usr/bin/env python
import sys
import os.path
import cv2
import numpy
import utils
HAAR_CASCADE_CLASSIFIER = 'config/haarcascade_frontalface_default.xml'
class FaceDetector:
def __init__(self, classifier_file=HAAR_CASCADE_CLASSIFIER):
self.classifier_file = classifier_file
self.face_cascade = cv2.CascadeClassifier(classifier_file)
def detect(self, image):
"""
Convert the image to grayscale as OpenCV face detector expects gray
images.
"""
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = self.face_cascade.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30, 30)
)
if len(faces) == 0:
return None, None
# FIXME assumes there will be only one face
# Extract the face area
(x, y, w, h) = faces[0]
# Return only the face part of the image
return gray[y:y+w, x:x+h], faces[0]
class FaceRecogniser:
def __init__(self):
self.face_detector = FaceDetector()
self.face_recogniser = cv2.face.LBPHFaceRecognizer_create()
self.subject_to_label = {}
def train(self, training_data_path):
faces = []
labels = []
#cv2.namedWindow('Training...', cv2.WND_PROP_FULLSCREEN)
#cv2.setWindowProperty('Training...', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)
#screen_width, screen_height = utils.get_screen_resolution()
#window_width = int(screen_width * 0.8)
#window_height = int(screen_height * 0.8)
#cv2.namedWindow('Training...', cv2.WINDOW_NORMAL)
#cv2.resizeWindow('Training...', window_width, window_height)
cv2.namedWindow('Training...')
cv2.moveWindow('Training...', 0, 0)
dirs = os.listdir(training_data_path)
print dirs
label = 0
for dir_name in dirs:
if dir_name.startswith('.'):
continue
subject = dir_name
print subject, label
subject_path = os.path.join(training_data_path, dir_name)
subject_image_files = os.listdir(subject_path)
# Read each image, detect the face, add the detected face to the
# subject's list of faces
for image_name in subject_image_files:
# Ignore system files like .DS_Store
if image_name.startswith('.') or image_name == 'name.txt':
continue
print image_name
image_path = os.path.join(subject_path, image_name)
image = cv2.imread(image_path)
# Display an image window to show the image
#cv2.imshow('Training...', image)
small_image = cv2.resize(image, None, fx = 0.1, fy = 0.1)
cv2.imshow('Training...', small_image)
cv2.waitKey(100)
# Detect and record face
face, rect = self.face_detector.detect(image)
if face is not None:
small_rectangle_coordinates = utils.scale_coordinates(rect, 0.1)
utils.draw_rectangle(small_image, small_rectangle_coordinates)
cv2.imshow('Training...', small_image)
cv2.waitKey(100)
faces.append(face)
labels.append(label)
label += 1
# Clean up after ourselves
cv2.destroyWindow('Training...')
cv2.waitKey(1)
cv2.destroyAllWindows()
self.face_recogniser.train(faces, numpy.array(labels))
def main():
face_recogniser = FaceRecogniser()
face_recogniser.train(sys.argv[1])
if __name__ == '__main__':
main()
|
py | b4049445e75f58063741ce50b3979cd699ac0c45 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for git_common.py"""
import binascii
import collections
import os
import shutil
import signal
import sys
import tempfile
import time
import unittest
DEPOT_TOOLS_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, DEPOT_TOOLS_ROOT)
from testing_support import coverage_utils
from testing_support import git_test_utils
class GitCommonTestBase(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(GitCommonTestBase, cls).setUpClass()
import git_common
cls.gc = git_common
cls.gc.TEST_MODE = True
class Support(GitCommonTestBase):
def _testMemoizeOneBody(self, threadsafe):
calls = collections.defaultdict(int)
def double_if_even(val):
calls[val] += 1
return val * 2 if val % 2 == 0 else None
# Use this explicitly as a wrapper fn instead of a decorator. Otherwise
# pylint crashes (!!)
double_if_even = self.gc.memoize_one(threadsafe=threadsafe)(double_if_even)
self.assertEqual(4, double_if_even(2))
self.assertEqual(4, double_if_even(2))
self.assertEqual(None, double_if_even(1))
self.assertEqual(None, double_if_even(1))
self.assertDictEqual({1: 2, 2: 1}, calls)
double_if_even.set(10, 20)
self.assertEqual(20, double_if_even(10))
self.assertDictEqual({1: 2, 2: 1}, calls)
double_if_even.clear()
self.assertEqual(4, double_if_even(2))
self.assertEqual(4, double_if_even(2))
self.assertEqual(None, double_if_even(1))
self.assertEqual(None, double_if_even(1))
self.assertEqual(20, double_if_even(10))
self.assertDictEqual({1: 4, 2: 2, 10: 1}, calls)
def testMemoizeOne(self):
self._testMemoizeOneBody(threadsafe=False)
def testMemoizeOneThreadsafe(self):
self._testMemoizeOneBody(threadsafe=True)
def testOnce(self):
testlist = []
# This works around a bug in pylint
once = self.gc.once
@once
def add_to_list():
testlist.append('dog')
add_to_list()
add_to_list()
add_to_list()
add_to_list()
self.assertEquals(testlist, ['dog'])
def slow_square(i):
"""Helper for ScopedPoolTest.
Must be global because non top-level functions aren't pickleable.
"""
return i ** 2
class ScopedPoolTest(GitCommonTestBase):
CTRL_C = signal.CTRL_C_EVENT if sys.platform == 'win32' else signal.SIGINT
def testThreads(self):
result = []
with self.gc.ScopedPool(kind='threads') as pool:
result = list(pool.imap(slow_square, xrange(10)))
self.assertEqual([0, 1, 4, 9, 16, 25, 36, 49, 64, 81], result)
def testThreadsCtrlC(self):
result = []
with self.assertRaises(KeyboardInterrupt):
with self.gc.ScopedPool(kind='threads') as pool:
# Make sure this pool is interrupted in mid-swing
for i in pool.imap(slow_square, xrange(20)):
if i > 32:
os.kill(os.getpid(), self.CTRL_C)
result.append(i)
self.assertEqual([0, 1, 4, 9, 16, 25], result)
def testProcs(self):
result = []
with self.gc.ScopedPool() as pool:
result = list(pool.imap(slow_square, xrange(10)))
self.assertEqual([0, 1, 4, 9, 16, 25, 36, 49, 64, 81], result)
def testProcsCtrlC(self):
result = []
with self.assertRaises(KeyboardInterrupt):
with self.gc.ScopedPool() as pool:
# Make sure this pool is interrupted in mid-swing
for i in pool.imap(slow_square, xrange(20)):
if i > 32:
os.kill(os.getpid(), self.CTRL_C)
result.append(i)
self.assertEqual([0, 1, 4, 9, 16, 25], result)
class ProgressPrinterTest(GitCommonTestBase):
class FakeStream(object):
def __init__(self):
self.data = set()
self.count = 0
def write(self, line):
self.data.add(line)
def flush(self):
self.count += 1
@unittest.expectedFailure
def testBasic(self):
"""This test is probably racy, but I don't have a better alternative."""
fmt = '%(count)d/10'
stream = self.FakeStream()
pp = self.gc.ProgressPrinter(fmt, enabled=True, fout=stream, period=0.01)
with pp as inc:
for _ in xrange(10):
time.sleep(0.02)
inc()
filtered = {x.strip() for x in stream.data}
rslt = {fmt % {'count': i} for i in xrange(11)}
self.assertSetEqual(filtered, rslt)
self.assertGreaterEqual(stream.count, 10)
class GitReadOnlyFunctionsTest(git_test_utils.GitRepoReadOnlyTestBase,
GitCommonTestBase):
REPO_SCHEMA = """
A B C D
B E D
"""
COMMIT_A = {
'some/files/file1': {'data': 'file1'},
'some/files/file2': {'data': 'file2'},
'some/files/file3': {'data': 'file3'},
'some/other/file': {'data': 'otherfile'},
}
COMMIT_C = {
'some/files/file2': {
'mode': 0755,
'data': 'file2 - vanilla\n'},
}
COMMIT_E = {
'some/files/file2': {'data': 'file2 - merged\n'},
}
COMMIT_D = {
'some/files/file2': {'data': 'file2 - vanilla\nfile2 - merged\n'},
}
def testHashes(self):
ret = self.repo.run(
self.gc.hash_multi, *[
'master',
'master~3',
self.repo['E']+'~',
self.repo['D']+'^2',
'tag_C^{}',
]
)
self.assertEqual([
self.repo['D'],
self.repo['A'],
self.repo['B'],
self.repo['E'],
self.repo['C'],
], ret)
self.assertEquals(
self.repo.run(self.gc.hash_one, 'branch_D'),
self.repo['D']
)
self.assertTrue(self.repo['D'].startswith(
self.repo.run(self.gc.hash_one, 'branch_D', short=True)))
def testStream(self):
items = set(self.repo.commit_map.itervalues())
def testfn():
for line in self.gc.run_stream('log', '--format=%H').xreadlines():
line = line.strip()
self.assertIn(line, items)
items.remove(line)
self.repo.run(testfn)
def testStreamWithRetcode(self):
items = set(self.repo.commit_map.itervalues())
def testfn():
with self.gc.run_stream_with_retcode('log', '--format=%H') as stdout:
for line in stdout.xreadlines():
line = line.strip()
self.assertIn(line, items)
items.remove(line)
self.repo.run(testfn)
def testStreamWithRetcodeException(self):
import subprocess2
with self.assertRaises(subprocess2.CalledProcessError):
with self.gc.run_stream_with_retcode('checkout', 'unknown-branch'):
pass
def testCurrentBranch(self):
def cur_branch_out_of_git():
os.chdir('..')
return self.gc.current_branch()
self.assertIsNone(self.repo.run(cur_branch_out_of_git))
self.repo.git('checkout', 'branch_D')
self.assertEqual(self.repo.run(self.gc.current_branch), 'branch_D')
def testBranches(self):
# This check fails with git 2.4 (see crbug.com/487172)
self.assertEqual(self.repo.run(set, self.gc.branches()),
{'master', 'branch_D', 'root_A'})
def testDiff(self):
# Get the names of the blobs being compared (to avoid hard-coding).
c_blob_short = self.repo.git('rev-parse', '--short',
'tag_C:some/files/file2').stdout.strip()
d_blob_short = self.repo.git('rev-parse', '--short',
'tag_D:some/files/file2').stdout.strip()
expected_output = [
'diff --git a/some/files/file2 b/some/files/file2',
'index %s..%s 100755' % (c_blob_short, d_blob_short),
'--- a/some/files/file2',
'+++ b/some/files/file2',
'@@ -1 +1,2 @@',
' file2 - vanilla',
'+file2 - merged']
self.assertEqual(expected_output,
self.repo.run(self.gc.diff, 'tag_C', 'tag_D').split('\n'))
def testDormant(self):
self.assertFalse(self.repo.run(self.gc.is_dormant, 'master'))
self.repo.git('config', 'branch.master.dormant', 'true')
self.assertTrue(self.repo.run(self.gc.is_dormant, 'master'))
def testBlame(self):
def get_porcelain_for_commit(commit_name, lines):
format_string = ('%H {}\nauthor %an\nauthor-mail <%ae>\nauthor-time %at\n'
'author-tz +0000\ncommitter %cn\ncommitter-mail <%ce>\n'
'committer-time %ct\ncommitter-tz +0000\nsummary {}')
format_string = format_string.format(lines, commit_name)
info = self.repo.show_commit(commit_name, format_string=format_string)
return info.split('\n')
# Expect to blame line 1 on C, line 2 on E.
c_short = self.repo['C'][:8]
c_author = self.repo.show_commit('C', format_string='%an %ai')
e_short = self.repo['E'][:8]
e_author = self.repo.show_commit('E', format_string='%an %ai')
expected_output = ['%s (%s 1) file2 - vanilla' % (c_short, c_author),
'%s (%s 2) file2 - merged' % (e_short, e_author)]
self.assertEqual(expected_output,
self.repo.run(self.gc.blame, 'some/files/file2',
'tag_D').split('\n'))
# Test porcelain.
expected_output = []
expected_output.extend(get_porcelain_for_commit('C', '1 1 1'))
expected_output.append('previous %s some/files/file2' % self.repo['B'])
expected_output.append('filename some/files/file2')
expected_output.append('\tfile2 - vanilla')
expected_output.extend(get_porcelain_for_commit('E', '1 2 1'))
expected_output.append('previous %s some/files/file2' % self.repo['B'])
expected_output.append('filename some/files/file2')
expected_output.append('\tfile2 - merged')
self.assertEqual(expected_output,
self.repo.run(self.gc.blame, 'some/files/file2',
'tag_D', porcelain=True).split('\n'))
def testParseCommitrefs(self):
ret = self.repo.run(
self.gc.parse_commitrefs, *[
'master',
'master~3',
self.repo['E']+'~',
self.repo['D']+'^2',
'tag_C^{}',
]
)
self.assertEqual(ret, map(binascii.unhexlify, [
self.repo['D'],
self.repo['A'],
self.repo['B'],
self.repo['E'],
self.repo['C'],
]))
with self.assertRaisesRegexp(Exception, r"one of \('master', 'bananas'\)"):
self.repo.run(self.gc.parse_commitrefs, 'master', 'bananas')
def testRepoRoot(self):
def cd_and_repo_root(path):
print(os.getcwd())
os.chdir(path)
return self.gc.repo_root()
self.assertEqual(self.repo.repo_path, self.repo.run(self.gc.repo_root))
# cd to a subdirectory; repo_root should still return the root dir.
self.assertEqual(self.repo.repo_path,
self.repo.run(cd_and_repo_root, 'some/files'))
def testTags(self):
self.assertEqual(set(self.repo.run(self.gc.tags)),
{'tag_'+l for l in 'ABCDE'})
def testTree(self):
tree = self.repo.run(self.gc.tree, 'master:some/files')
file1 = self.COMMIT_A['some/files/file1']['data']
file2 = self.COMMIT_D['some/files/file2']['data']
file3 = self.COMMIT_A['some/files/file3']['data']
self.assertEquals(
tree['file1'],
('100644', 'blob', git_test_utils.git_hash_data(file1)))
self.assertEquals(
tree['file2'],
('100755', 'blob', git_test_utils.git_hash_data(file2)))
self.assertEquals(
tree['file3'],
('100644', 'blob', git_test_utils.git_hash_data(file3)))
tree = self.repo.run(self.gc.tree, 'master:some')
self.assertEquals(len(tree), 2)
# Don't check the tree hash because we're lazy :)
self.assertEquals(tree['files'][:2], ('040000', 'tree'))
tree = self.repo.run(self.gc.tree, 'master:wat')
self.assertEqual(tree, None)
def testTreeRecursive(self):
tree = self.repo.run(self.gc.tree, 'master:some', recurse=True)
file1 = self.COMMIT_A['some/files/file1']['data']
file2 = self.COMMIT_D['some/files/file2']['data']
file3 = self.COMMIT_A['some/files/file3']['data']
other = self.COMMIT_A['some/other/file']['data']
self.assertEquals(
tree['files/file1'],
('100644', 'blob', git_test_utils.git_hash_data(file1)))
self.assertEquals(
tree['files/file2'],
('100755', 'blob', git_test_utils.git_hash_data(file2)))
self.assertEquals(
tree['files/file3'],
('100644', 'blob', git_test_utils.git_hash_data(file3)))
self.assertEquals(
tree['other/file'],
('100644', 'blob', git_test_utils.git_hash_data(other)))
class GitMutableFunctionsTest(git_test_utils.GitRepoReadWriteTestBase,
GitCommonTestBase):
REPO_SCHEMA = ''
def _intern_data(self, data):
with tempfile.TemporaryFile() as f:
f.write(data)
f.seek(0)
return self.repo.run(self.gc.intern_f, f)
def testInternF(self):
data = 'CoolBobcatsBro'
data_hash = self._intern_data(data)
self.assertEquals(git_test_utils.git_hash_data(data), data_hash)
self.assertEquals(data, self.repo.git('cat-file', 'blob', data_hash).stdout)
def testMkTree(self):
tree = {}
for i in 1, 2, 3:
name = 'file%d' % i
tree[name] = ('100644', 'blob', self._intern_data(name))
tree_hash = self.repo.run(self.gc.mktree, tree)
self.assertEquals('37b61866d6e061c4ba478e7eb525be7b5752737d', tree_hash)
def testConfig(self):
self.repo.git('config', '--add', 'happy.derpies', 'food')
self.assertEquals(self.repo.run(self.gc.config_list, 'happy.derpies'),
['food'])
self.assertEquals(self.repo.run(self.gc.config_list, 'sad.derpies'), [])
self.repo.git('config', '--add', 'happy.derpies', 'cat')
self.assertEquals(self.repo.run(self.gc.config_list, 'happy.derpies'),
['food', 'cat'])
self.assertEquals('cat', self.repo.run(self.gc.config, 'dude.bob', 'cat'))
self.repo.run(self.gc.set_config, 'dude.bob', 'dog')
self.assertEquals('dog', self.repo.run(self.gc.config, 'dude.bob', 'cat'))
self.repo.run(self.gc.del_config, 'dude.bob')
# This should work without raising an exception
self.repo.run(self.gc.del_config, 'dude.bob')
self.assertEquals('cat', self.repo.run(self.gc.config, 'dude.bob', 'cat'))
self.assertEquals('origin/master', self.repo.run(self.gc.root))
self.repo.git('config', 'depot-tools.upstream', 'catfood')
self.assertEquals('catfood', self.repo.run(self.gc.root))
def testUpstream(self):
self.repo.git('commit', '--allow-empty', '-am', 'foooooo')
self.assertEquals(self.repo.run(self.gc.upstream, 'bobly'), None)
self.assertEquals(self.repo.run(self.gc.upstream, 'master'), None)
self.repo.git('checkout', '-tb', 'happybranch', 'master')
self.assertEquals(self.repo.run(self.gc.upstream, 'happybranch'),
'master')
def testNormalizedVersion(self):
self.assertTrue(all(
isinstance(x, int) for x in self.repo.run(self.gc.get_git_version)))
def testGetBranchesInfo(self):
self.repo.git('commit', '--allow-empty', '-am', 'foooooo')
self.repo.git('checkout', '-tb', 'happybranch', 'master')
self.repo.git('commit', '--allow-empty', '-am', 'foooooo')
self.repo.git('checkout', '-tb', 'child', 'happybranch')
self.repo.git('checkout', '-tb', 'to_delete', 'master')
self.repo.git('checkout', '-tb', 'parent_gone', 'to_delete')
self.repo.git('branch', '-D', 'to_delete')
supports_track = (
self.repo.run(self.gc.get_git_version)
>= self.gc.MIN_UPSTREAM_TRACK_GIT_VERSION)
actual = self.repo.run(self.gc.get_branches_info, supports_track)
expected = {
'happybranch': (
self.repo.run(self.gc.hash_one, 'happybranch', short=True),
'master',
1 if supports_track else None,
None
),
'child': (
self.repo.run(self.gc.hash_one, 'child', short=True),
'happybranch',
None,
None
),
'master': (
self.repo.run(self.gc.hash_one, 'master', short=True),
'',
None,
None
),
'': None,
'parent_gone': (
self.repo.run(self.gc.hash_one, 'parent_gone', short=True),
'to_delete',
None,
None
),
'to_delete': None
}
self.assertEquals(expected, actual)
class GitMutableStructuredTest(git_test_utils.GitRepoReadWriteTestBase,
GitCommonTestBase):
REPO_SCHEMA = """
A B C D E F G
B H I J K
J L
X Y Z
CAT DOG
"""
COMMIT_B = {'file': {'data': 'B'}}
COMMIT_H = {'file': {'data': 'H'}}
COMMIT_I = {'file': {'data': 'I'}}
COMMIT_J = {'file': {'data': 'J'}}
COMMIT_K = {'file': {'data': 'K'}}
COMMIT_L = {'file': {'data': 'L'}}
def setUp(self):
super(GitMutableStructuredTest, self).setUp()
self.repo.git('branch', '--set-upstream-to', 'root_X', 'branch_Z')
self.repo.git('branch', '--set-upstream-to', 'branch_G', 'branch_K')
self.repo.git('branch', '--set-upstream-to', 'branch_K', 'branch_L')
self.repo.git('branch', '--set-upstream-to', 'root_A', 'branch_G')
self.repo.git('branch', '--set-upstream-to', 'root_X', 'root_A')
def testTooManyBranches(self):
for i in xrange(30):
self.repo.git('branch', 'a'*i)
_, rslt = self.repo.capture_stdio(list, self.gc.branches())
self.assertIn('too many branches (39/20)', rslt)
self.repo.git('config', 'depot-tools.branch-limit', 'cat')
_, rslt = self.repo.capture_stdio(list, self.gc.branches())
self.assertIn('too many branches (39/20)', rslt)
self.repo.git('config', 'depot-tools.branch-limit', '100')
# should not raise
# This check fails with git 2.4 (see crbug.com/487172)
self.assertEqual(38, len(self.repo.run(list, self.gc.branches())))
def testMergeBase(self):
self.repo.git('checkout', 'branch_K')
self.assertEqual(
self.repo['B'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
self.assertEqual(
self.repo['J'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_L', 'branch_K')
)
self.assertEqual(
self.repo['B'], self.repo.run(self.gc.config, 'branch.branch_K.base')
)
self.assertEqual(
'branch_G', self.repo.run(self.gc.config, 'branch.branch_K.base-upstream')
)
# deadbeef is a bad hash, so this will result in repo['B']
self.repo.run(self.gc.manual_merge_base, 'branch_K', 'deadbeef', 'branch_G')
self.assertEqual(
self.repo['B'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
# but if we pick a real ancestor, then it'll work
self.repo.run(self.gc.manual_merge_base, 'branch_K', self.repo['I'],
'branch_G')
self.assertEqual(
self.repo['I'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
self.assertEqual({'branch_K': self.repo['I'], 'branch_L': self.repo['J']},
self.repo.run(self.gc.branch_config_map, 'base'))
self.repo.run(self.gc.remove_merge_base, 'branch_K')
self.repo.run(self.gc.remove_merge_base, 'branch_L')
self.assertEqual(None,
self.repo.run(self.gc.config, 'branch.branch_K.base'))
self.assertEqual({}, self.repo.run(self.gc.branch_config_map, 'base'))
# if it's too old, then it caps at merge-base
self.repo.run(self.gc.manual_merge_base, 'branch_K', self.repo['A'],
'branch_G')
self.assertEqual(
self.repo['B'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K', 'branch_G')
)
# If the user does --set-upstream-to something else, then we discard the
# base and recompute it.
self.repo.run(self.gc.run, 'branch', '-u', 'root_A')
self.assertEqual(
self.repo['A'],
self.repo.run(self.gc.get_or_create_merge_base, 'branch_K')
)
self.assertIsNone(
self.repo.run(self.gc.get_or_create_merge_base, 'branch_DOG'))
def testGetBranchTree(self):
skipped, tree = self.repo.run(self.gc.get_branch_tree)
# This check fails with git 2.4 (see crbug.com/487172)
self.assertEqual(skipped, {'master', 'root_X', 'branch_DOG', 'root_CAT'})
self.assertEqual(tree, {
'branch_G': 'root_A',
'root_A': 'root_X',
'branch_K': 'branch_G',
'branch_L': 'branch_K',
'branch_Z': 'root_X'
})
topdown = list(self.gc.topo_iter(tree))
bottomup = list(self.gc.topo_iter(tree, top_down=False))
self.assertEqual(topdown, [
('branch_Z', 'root_X'),
('root_A', 'root_X'),
('branch_G', 'root_A'),
('branch_K', 'branch_G'),
('branch_L', 'branch_K'),
])
self.assertEqual(bottomup, [
('branch_L', 'branch_K'),
('branch_Z', 'root_X'),
('branch_K', 'branch_G'),
('branch_G', 'root_A'),
('root_A', 'root_X'),
])
def testIsGitTreeDirty(self):
self.assertEquals(False, self.repo.run(self.gc.is_dirty_git_tree, 'foo'))
self.repo.open('test.file', 'w').write('test data')
self.repo.git('add', 'test.file')
self.assertEquals(True, self.repo.run(self.gc.is_dirty_git_tree, 'foo'))
def testSquashBranch(self):
self.repo.git('checkout', 'branch_K')
self.assertEquals(True, self.repo.run(self.gc.squash_current_branch,
'cool message'))
lines = ['cool message', '']
for l in 'HIJK':
lines.extend((self.repo[l], l, ''))
lines.pop()
msg = '\n'.join(lines)
self.assertEquals(self.repo.run(self.gc.run, 'log', '-n1', '--format=%B'),
msg)
self.assertEquals(
self.repo.git('cat-file', 'blob', 'branch_K:file').stdout,
'K'
)
def testSquashBranchEmpty(self):
self.repo.git('checkout', 'branch_K')
self.repo.git('checkout', 'branch_G', '.')
self.repo.git('commit', '-m', 'revert all changes no branch')
# Should return False since the quash would result in an empty commit
stdout = self.repo.capture_stdio(self.gc.squash_current_branch)[0]
self.assertEquals(stdout, 'Nothing to commit; squashed branch is empty\n')
def testRebase(self):
self.assertSchema("""
A B C D E F G
B H I J K
J L
X Y Z
CAT DOG
""")
rslt = self.repo.run(
self.gc.rebase, 'branch_G', 'branch_K~4', 'branch_K')
self.assertTrue(rslt.success)
self.assertSchema("""
A B C D E F G H I J K
B H I J L
X Y Z
CAT DOG
""")
rslt = self.repo.run(
self.gc.rebase, 'branch_K', 'branch_L~1', 'branch_L', abort=True)
self.assertFalse(rslt.success)
self.assertFalse(self.repo.run(self.gc.in_rebase))
rslt = self.repo.run(
self.gc.rebase, 'branch_K', 'branch_L~1', 'branch_L', abort=False)
self.assertFalse(rslt.success)
self.assertTrue(self.repo.run(self.gc.in_rebase))
self.assertEqual(self.repo.git('status', '--porcelain').stdout, 'UU file\n')
self.repo.git('checkout', '--theirs', 'file')
self.repo.git('add', 'file')
self.repo.git('rebase', '--continue')
self.assertSchema("""
A B C D E F G H I J K L
X Y Z
CAT DOG
""")
class GitFreezeThaw(git_test_utils.GitRepoReadWriteTestBase):
@classmethod
def setUpClass(cls):
super(GitFreezeThaw, cls).setUpClass()
import git_common
cls.gc = git_common
cls.gc.TEST_MODE = True
REPO_SCHEMA = """
A B C D
B E D
"""
COMMIT_A = {
'some/files/file1': {'data': 'file1'},
'some/files/file2': {'data': 'file2'},
'some/files/file3': {'data': 'file3'},
'some/other/file': {'data': 'otherfile'},
}
COMMIT_C = {
'some/files/file2': {
'mode': 0755,
'data': 'file2 - vanilla'},
}
COMMIT_E = {
'some/files/file2': {'data': 'file2 - merged'},
}
COMMIT_D = {
'some/files/file2': {'data': 'file2 - vanilla\nfile2 - merged'},
}
def testNothing(self):
self.assertIsNotNone(self.repo.run(self.gc.thaw)) # 'Nothing to thaw'
self.assertIsNotNone(self.repo.run(self.gc.freeze)) # 'Nothing to freeze'
def testAll(self):
def inner():
with open('some/files/file2', 'a') as f2:
print >> f2, 'cool appended line'
os.mkdir('some/other_files')
with open('some/other_files/subdir_file', 'w') as f3:
print >> f3, 'new file!'
with open('some/files/file5', 'w') as f5:
print >> f5, 'New file!1!one!'
STATUS_1 = '\n'.join((
' M some/files/file2',
'A some/files/file5',
'?? some/other_files/'
)) + '\n'
self.repo.git('add', 'some/files/file5')
# Freeze group 1
self.assertEquals(self.repo.git('status', '--porcelain').stdout, STATUS_1)
self.assertIsNone(self.gc.freeze())
self.assertEquals(self.repo.git('status', '--porcelain').stdout, '')
# Freeze group 2
with open('some/files/file2', 'a') as f2:
print >> f2, 'new! appended line!'
self.assertEquals(self.repo.git('status', '--porcelain').stdout,
' M some/files/file2\n')
self.assertIsNone(self.gc.freeze())
self.assertEquals(self.repo.git('status', '--porcelain').stdout, '')
# Thaw it out!
self.assertIsNone(self.gc.thaw())
self.assertIsNotNone(self.gc.thaw()) # One thaw should thaw everything
self.assertEquals(self.repo.git('status', '--porcelain').stdout, STATUS_1)
self.repo.run(inner)
class GitMakeWorkdir(git_test_utils.GitRepoReadOnlyTestBase, GitCommonTestBase):
def setUp(self):
self._tempdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self._tempdir)
REPO_SCHEMA = """
A
"""
def testMakeWorkdir(self):
if not hasattr(os, 'symlink'):
return
workdir = os.path.join(self._tempdir, 'workdir')
self.gc.make_workdir(os.path.join(self.repo.repo_path, '.git'),
os.path.join(workdir, '.git'))
EXPECTED_LINKS = [
'config', 'info', 'hooks', 'logs/refs', 'objects', 'refs',
]
for path in EXPECTED_LINKS:
self.assertTrue(os.path.islink(os.path.join(workdir, '.git', path)))
self.assertEqual(os.path.realpath(os.path.join(workdir, '.git', path)),
os.path.join(self.repo.repo_path, '.git', path))
self.assertFalse(os.path.islink(os.path.join(workdir, '.git', 'HEAD')))
class GitTestUtilsTest(git_test_utils.GitRepoReadOnlyTestBase):
REPO_SCHEMA = """
A B
"""
COMMIT_A = {
'file1': {'data': 'file1'},
}
COMMIT_B = {
'file1': {'data': 'file1 changed'},
}
def testAutomaticCommitDates(self):
# The dates should start from 1970-01-01 and automatically increment. They
# must be in UTC (otherwise the tests are system-dependent, and if your
# local timezone is positive, timestamps will be <0 which causes bizarre
# behaviour in Git; http://crbug.com/581895).
self.assertEquals('Author McAuthorly 1970-01-01 00:00:00 +0000',
self.repo.show_commit('A', format_string='%an %ai'))
self.assertEquals('Charles Committish 1970-01-02 00:00:00 +0000',
self.repo.show_commit('A', format_string='%cn %ci'))
self.assertEquals('Author McAuthorly 1970-01-03 00:00:00 +0000',
self.repo.show_commit('B', format_string='%an %ai'))
self.assertEquals('Charles Committish 1970-01-04 00:00:00 +0000',
self.repo.show_commit('B', format_string='%cn %ci'))
if __name__ == '__main__':
sys.exit(coverage_utils.covered_main(
os.path.join(DEPOT_TOOLS_ROOT, 'git_common.py')))
|
py | b40495c3a8bc79d2675c995e72a45dc24462ea68 |
#from within a working, case-specific subdirectory of the directory containing autorun,
export PYTHONPATH=$PYTHONPATH:`cd ..; pwd`
#Edit vary_params.py
python vary_params.py
#Edit path in examine_big_run.py
python examine_big_run.py |
py | b4049649c7f6a709cbe4ff9bfc8a58179fb32470 | """Plot utilities and functions
"""
def cm2inch(*tupl):
"""Convert from cm to inches.
Matplotlib uses inches as default unit.
Conversion supports tuples as the figsize option for figure.
"""
inch = 2.54
if isinstance(tupl[0], tuple):
return tuple(i/inch for i in tupl[0])
else:
return tuple(i/inch for i in tupl)
|
py | b404966462ba479afd7a4f92b3abc980739905bf | # coding: utf-8
"""
Pure Storage FlashBlade REST 1.10 Python SDK
Pure Storage FlashBlade REST 1.10 Python SDK. Compatible with REST API versions 1.0 - 1.10. Developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/).
OpenAPI spec version: 1.10
Contact: [email protected]
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class LogsApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def list_logs(self, **kwargs):
"""
Download logs
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_logs(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int end_time: Time to end sample in milliseconds since epoch.
:param int start_time: Time to start sample in milliseconds since epoch.
:return: LogDownloadResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.list_logs_with_http_info(**kwargs)
else:
(data) = self.list_logs_with_http_info(**kwargs)
return data
def list_logs_with_http_info(self, **kwargs):
"""
Download logs
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.list_logs_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int end_time: Time to end sample in milliseconds since epoch.
:param int start_time: Time to start sample in milliseconds since epoch.
:return: LogDownloadResponse
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['end_time', 'start_time']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_logs" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'end_time' in params:
query_params.append(('end_time', params['end_time']))
if 'start_time' in params:
query_params.append(('start_time', params['start_time']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/octet-stream', 'text/plain', 'application/json'])
# Authentication setting
auth_settings = ['AuthTokenHeader']
return self.api_client.call_api('/1.10/logs', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='LogDownloadResponse',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
py | b404981129bd321b9e3059a88ab8d5a789042b55 | from flask import Flask, redirect, url_for, render_template, request, flash, session, jsonify
import datetime
import mysql.connector as mysql
from functools import wraps
from app.constants import Colors, Manufacturer, VehicleTypes
from app.sql import *
from app import app
MANAGER = "Manager"
INVENTORY_CLERK = "InverntoryClerk"
SALESPERSON = "Salesperson"
SERVICE_WRITER = "ServiceWriter"
ANONYMOUS = "anonymous"
ROLAND_AROUND = "Owner"
db_connection = mysql.connect(host='50.87.253.41', database='charljl4_jj', user='charljl4_team007', password='team007',
port=3306)
# https://github.com/ashishsarkar/UserLogin/blob/master/app.py
# check if user logged in
"""
Christie
"""
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please Login with correct credential', 'danger')
return redirect(url_for('login'))
return wrap
def roland_login_as_other(session, r):
cur_role = session['role']
next_role = session.get('switch_to_role', None)
return cur_role == ROLAND_AROUND and next_role and next_role == r
def calculate_available_vehicles():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(AvailableVehicles)
available_vehicles = cursor.fetchone()
return available_vehicles[0]
"""
Christie
"""
def load_vehicles():
d = {}
db_connection.reconnect()
print("Connected to:", db_connection.get_server_info())
cursor = db_connection.cursor()
cursor.execute("SELECT * FROM Vehicle;")
vehicles = cursor.fetchall()
for i, vehicle in enumerate(vehicles):
v = {
'id': vehicle[0],
'price': vehicle[1],
'manufacturer': vehicle[2]
}
d[i] = v
return d
"""
Christie
"""
@app.route("/monthly_drilldown/<yyyymm>", methods=["GET"])
def monthly_drilldown_reports(yyyymm):
# FirstName-LastName-TaxID
year, month = yyyymm.split("-")[0], yyyymm.split("-")[1]
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(DrilldownReport, (year, month))
detail_records = cursor.fetchall()
return render_template('reports/drilldown_reports.html', records=detail_records)
"""
Christie
"""
@app.route("/sales_by_manufacturer", methods=["GET"])
def sales_by_manufacturer_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(SalesByManufacturer)
sales_by_manufacturer = cursor.fetchall()
return render_template('reports/sales_by_manufacturer.html', records=sales_by_manufacturer)
"""
Christie
"""
@app.route("/sales_by_type", methods=["GET"])
def sales_by_type_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(SalesByType)
sales_by_type = cursor.fetchall()
return render_template('reports/sales_by_type.html', records=sales_by_type)
"""
Christie
"""
@app.route("/part_stats", methods=["GET"])
def part_stats_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(PartStatistics)
part_stats = cursor.fetchall()
return render_template('reports/part_stats.html', records=part_stats)
"""
Christie
"""
@app.route("/below_cost", methods=["GET"])
def below_cost_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(BelowCost)
below_cost = cursor.fetchall()
return render_template('reports/below_cost.html', records=below_cost)
"""
Christie
"""
# TODO: add two queries together
@app.route("/gross_income", methods=["GET"])
def gross_income_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(GrossIncome)
gross_income = cursor.fetchall()
return render_template('reports/gross_income.html', records=gross_income)
"""
Christie
"""
@app.route("/monthly_sale", methods=["GET"])
def monthly_sale_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(MonthlySale)
monthly_sale = cursor.fetchall()
return render_template('reports/monthly_sale.html', records=monthly_sale)
"""
Christie
"""
@app.route("/repair_reports", methods=["GET"])
def repair_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(RepairByType, ('SUV'))
repair_by_type = cursor.fetchall()
cursor.execute(RepairByManufacturer, ('Honda'))
repair_by_manufacturer = cursor.fetchall()
return render_template('reports/repair_reports.html', records=repair_by_manufacturer)
"""
Christie
"""
@app.route("/avg_inventory", methods=["GET"])
def avg_inventory_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(AverageInventoryTime)
avg_inventory = cursor.fetchall()
return render_template('reports/avg_inventory.html', records=avg_inventory)
"""
Christie
"""
@app.route("/sales_by_color", methods=["GET"])
def sales_by_color_reports():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(SalesByColor)
sales_by_color = cursor.fetchall()
return render_template('reports/sales_by_color.html', records=sales_by_color)
"""
Christie
"""
@app.route("/login", methods=["POST", "GET"])
def login():
db_connection.reconnect()
if request.method == "POST":
username = request.form["username"]
password = request.form["password"]
cur = db_connection.cursor()
cur.execute("SELECT * FROM RegisteredUser WHERE username=%s AND user_password=%s", (username, password))
data = cur.fetchone()
if data:
session['logged_in'] = True
session['username'] = data[0]
session['role'] = data[4]
flash('Login Successfully', 'success')
return redirect('home')
else:
flash('Invalid Login. Try Again', 'danger')
return render_template("login.html")
else:
return render_template("login.html")
"""
Christie
"""
# logout
@app.route("/logout")
def logout():
if 'switch_to_role' in session:
session.pop('switch_to_role')
return redirect('home')
else:
session.clear()
flash('You are now logged out', 'success')
return redirect(url_for('login'))
"""
Christie
"""
@app.route('/register', methods=['POST', 'GET'])
def register():
status = False
if request.method == 'POST':
name = request.form["username"]
email = request.form["email"]
password = request.form["password"]
# cur = mysql.connection.cursor()
# cur.execute("insert into users(UNAME,UPASS,EMAIL) values(%s,%s,%s)", (name, pwd, email))
# mysql.connection.commit()
# cur.close()
flash('Registration Successfully. Login Here...', 'success')
return redirect('login')
return render_template("register.html", status=status)
"""
Christie
"""
@app.route('/add_vehicle', methods=['POST'])
def add_vehicle():
if request.method == 'POST':
vin = request.form["vin"]
invoice_price = request.form["invoice_price"]
manu_name = request.form["manu_name"]
inbound_date = request.form["inbound_date"]
model_year = request.form["model_year"]
model_name = request.form["model_name"]
optional_description = request.form["optional_description"]
vehicleTypeID = request.form["vehicleTypeID"]
vehicleInputterID = request.form["vehicleInputterID"]
session['vin'] = vin
cur = db_connection.cursor()
cur.execute("insert into Vehicle(VIN, invoice_price, manu_name, inbound_date, model_year, model_name, optional_description, vehicleTypeID, vehicleInputterID)\
values(%s,%s,%s,%s,%s,%s,%s,%s,%s)", (
vin, invoice_price, manu_name, inbound_date, model_year, model_name, optional_description, vehicleTypeID,
vehicleInputterID))
db_connection.commit()
cur.close()
flash('Registration Successfully. Login Here...', 'success')
return redirect('view_vehicle')
return render_template("vehicle.html")
"""
Christie
"""
@app.route('/add_customer', methods=['POST', 'GET'])
def add_customer():
if request.method == 'POST':
street_address = request.form["street_address"]
city = request.form["city"]
state = request.form["state"]
postal_code = request.form["postal_code"]
email_address = request.form["email_address"]
phone_number = request.form["phone_number"]
is_individual = request.form['is_individual']
cur = db_connection.cursor()
cur.execute("INSERT INTO Customer(street_address, city, state, postal_code, email_address, phone_number, is_individual)\
values(%s,%s,%s,%s,%s,%s,%s)",
(street_address, city, state, postal_code, email_address, phone_number, is_individual))
db_connection.commit()
cur.close()
flash('Registration Successfully.', 'success')
if is_individual == "1":
return redirect('add_individual')
else:
return redirect('add_business')
return render_template('register_customer.html')
"""
Christie
"""
@app.route('/add_individual', methods=['POST', 'GET'])
def add_individual():
if request.method == 'POST':
db_connection.reconnect()
cur = db_connection.cursor()
driver_license = request.form["driver_license"]
ind_first_name = request.form['ind_first_name']
ind_last_name = request.form['ind_last_name']
cur.execute("SELECT COUNT(*) FROM Customer")
customer_count = cur.fetchone()
cur.execute(InsertIndividual, (driver_license, str(customer_count[0]), ind_first_name, ind_last_name))
db_connection.commit()
cur.close()
flash('Individual {} {} has been added successfully'.format(ind_first_name, ind_last_name), 'success')
return render_template('register_individual.html')
"""
Christie
"""
@app.route('/add_business', methods=['POST', 'GET'])
def add_business():
if request.method == 'POST':
db_connection.reconnect()
cur = db_connection.cursor()
tax_id = request.form['tax_id']
business_name = request.form['business_name']
title = request.form['title']
contact_name = request.form['contact_name']
cur.execute("SELECT COUNT(*) FROM Customer")
customer_count = cur.fetchone()
cur.execute(InsertBusiness, (tax_id, str(customer_count[0]), business_name, title, contact_name))
db_connection.commit()
cur.close()
flash('Business {} {} has been added successfully'.format(business_name, title), 'success')
return render_template('register_business.html')
"""
Christie
"""
@app.route('/view_vehicle', methods=['GET'])
def view_vehicle():
db_connection.reconnect()
cursor = db_connection.cursor()
vin = session['vin']
cursor.execute("SELECT * FROM Vehicle WHERE VIN=%s", (vin,))
row_of_car = cursor.fetchone()
info = {
'vin': row_of_car[0],
'invoice_price': row_of_car[1],
'manu_name': row_of_car[2],
'inbound_date': row_of_car[3],
'model_year': row_of_car[4],
'model_name': row_of_car[5],
'optional_description': row_of_car[6],
'vehicle_type': row_of_car[7],
'vehicle_type_id': row_of_car[8]
}
return render_template("vehicle_details.html", params=info)
"""
Christie
"""
@app.route("/search_data", methods=["POST", "GET"])
def public_search():
if request.method == 'POST':
print(request.form)
# vin = request.form['vin']
# vehicle_type = request.form['vehicle_type']
# manufacturer = request.form['manufacturer']
# model_year = request.form['model_year']
# color = request.form['color']
# list_price = request.form['list_price']
# key_word = request.form['key_word']
# db_connection.reconnect()
# cursor = db_connection.cursor()
# cursor.execute("SELECT * FROM Vehicle WHERE VIN=%s", (vin,))
# matches = cursor.fetchall()
# records = []
# for m in matches:
# info = {
# 'vin': m[0],
# 'invoice_price': m[1],
# 'manu_name': m[2],
# 'inbound_date': m[3],
# 'model_year': m[4],
# 'model_name': m[5],
# 'optional_description': m[6],
# 'vehicle_type': m[7],
# 'vehicle_type_id': m[8]
# }
# records.append(info)
records = []
if session['role'] == SALESPERSON:
return render_template("salesperson_filter_results.html", records=records)
return render_template("manager_filter_results.html", records=records)
"""
Christie
"""
@app.route("/search_customer", methods=["POST", "GET"])
def search_customer():
db_connection.reconnect()
if request.method == 'POST':
driver_license = request.form['driver_license']
tax_id = request.form['tax_id']
cursor = db_connection.cursor()
cursor.execute(FilterCustomer, (driver_license,))
customers = cursor.fetchall()
print("[Search Customer]: driver license: {}, tax_id: {}".format(driver_license, tax_id))
records = []
for customer in customers:
info = {
'customer_id': customer[0],
'street_address': customer[1],
'city': customer[2],
'state': customer[3],
'postal_code': customer[4],
'email': customer[5],
'phone': customer[6],
'is_individual': customer[7]
}
records.append(info)
if session['role'] == SALESPERSON:
return render_template("salesperson_customer_filter_results.html", records=records,
vin=session['propose_to_sale'])
return render_template("customer_filter_results.html", records=records)
@app.route("/sale_vehicle/<select_vin>", methods=["POST"])
def sale_vehicle(select_vin):
session['propose_to_sale'] = select_vin
return render_template("salesperson_customer_search.html")
@app.route('/add_order/<vin>/<customer_id>', methods=['GET'])
def add_order(vin, customer_id):
current_date = datetime.datetime.now()
return render_template("order.html", vin=vin, customer_id=customer_id, role='dyu', current_date=current_date)
# sold_price > 95% * invoice_price
@app.route('/submit_order', methods=['POST'])
def submit_order():
current_date = datetime.datetime.now()
if request.method == 'POST':
vin = request.form['vin']
sales_inputter_id = request.form['sales_inputter_id']
sold_price = request.form['sold_price']
customer_id = request.form['customer_id']
db_connection.reconnect()
cur = db_connection.cursor()
cur.execute(InsertPurchase, (sales_inputter_id, vin, customer_id, current_date, sold_price))
db_connection.commit()
cur.close()
flash('Order Submitted Correctly!', 'info')
return redirect('home')
@app.route("/switch_role", methods=["POST"])
def switch_role():
session['switch_to_role'] = request.form['switch']
print(session)
return redirect(request.referrer)
"""
Christie
"""
def dynamic_dropdown():
db_connection.reconnect()
cursor = db_connection.cursor()
cursor.execute(SelectDistinctVIN)
vin = cursor.fetchall()
cursor.execute(SelectDistinctTypeName)
vehicles_types = cursor.fetchall()
cursor.execute(SelectDistinctModelYear)
model_years = cursor.fetchall()
cursor.execute(SelectDistinctColor)
colors = cursor.fetchall()
cursor.execute(SelectDistinctManufacturer)
manufacturers = cursor.fetchall()
list_vin = map(lambda x: x[0], vin)
list_vehicles_types = map(lambda x: x[0], vehicles_types)
list_model_year = map(lambda x: x[0], model_years)
list_of_colors = map(lambda x: x[0], colors)
list_of_manufacturers = map(lambda x: x[0], manufacturers)
d = {
'vin' : list_vin,
'vehicles_types' : list_vehicles_types,
'model_year': list_model_year,
'colors': list_of_colors,
'manufacturers': list_of_manufacturers
}
return d
@app.route('/home', methods=['GET'])
@is_logged_in
def index():
role = session['role']
dropdown_data = dynamic_dropdown()
if role == MANAGER or roland_login_as_other(session, MANAGER):
available_car_amount = calculate_available_vehicles()
return render_template("manager.html",
vin=dropdown_data.get('vin', []),
colors=dropdown_data.get('colors', []),
manufacturers=dropdown_data.get('manufacturers', []),
vehicles_types=dropdown_data.get('vehicles_types', []),
model_year=dropdown_data.get('model_year', []),
available_car_amount=available_car_amount)
elif role == INVENTORY_CLERK or roland_login_as_other(session, INVENTORY_CLERK):
return render_template("clerk.html", params=role)
elif role == SERVICE_WRITER or roland_login_as_other(session, SERVICE_WRITER):
return render_template("writer.html", params=role)
elif role == SALESPERSON or roland_login_as_other(session, SALESPERSON):
return render_template("salesperson.html", params=role, colors=Colors, manufacturers=Manufacturer,
vehicles_types=VehicleTypes)
elif role == ROLAND_AROUND:
return render_template('roland.html', colors=Colors, manufacturers=Manufacturer, vehicles_types=VehicleTypes)
|
py | b40498ee6e779130b2eb114b26c26f888308176b | import requests
from pytest import fixture, mark, raises
from scanapi.errors import InvalidPythonCodeError
from scanapi.evaluators import CodeEvaluator
@mark.describe("code evaluator")
@mark.describe("evaluate")
class TestEvaluate:
@fixture
def response(self, requests_mock):
requests_mock.get("http://test.com", text="abcde")
return requests.get("http://test.com")
test_data = ["no code", "${CODE}", "${code}", "{{code}}", 10, []]
@mark.context("when sequence does not match the pattern")
@mark.it("should return sequence")
@mark.parametrize("sequence", test_data)
def test_should_return_sequence(self, sequence):
assert CodeEvaluator.evaluate(sequence, {}) == sequence
test_data = [
("${{1 == 1}}", (True, None)),
("${{1 == 4}}", (False, "1 == 4")),
]
@mark.context("when sequence matches the pattern")
@mark.context("when it is a test case")
@mark.context("when code does not contain pre saved response")
@mark.it("should return assert results")
@mark.parametrize("sequence, expected", test_data)
def test_should_return_assert_results(self, sequence, expected):
assert (
CodeEvaluator.evaluate(sequence, {}, is_a_test_case=True)
== expected
)
test_data = [
("${{response.text == 'abcde'}}", (True, None)),
("${{response.url == 'http://test.com/'}}", (True, None),),
(
"${{response.status_code == 300}}",
(False, "response.status_code == 300"),
),
("${{response.url == 'abc'}}", (False, "response.url == 'abc'"),),
]
@mark.context("when sequence matches the pattern")
@mark.context("when it is a test case")
@mark.context("when code contains pre saved response")
@mark.it("should return assert results")
@mark.parametrize("sequence, expected", test_data)
def test_should_return_assert_results_2(self, sequence, expected, response):
assert (
CodeEvaluator.evaluate(
sequence, {"response": response}, is_a_test_case=True,
)
== expected
)
test_data = [
("${{1/0}}", {}, True),
("${{response.url == 'abc'}}", {}, True),
("${{foo = 'abc'}}", {}, True),
]
@mark.context("when sequence matches the pattern")
@mark.context("when it is a test case")
@mark.context("when code breaks")
@mark.it("should raises invalid python code error")
@mark.parametrize("sequence, spec_vars, is_a_test_case", test_data)
def test_should_raises_invalid_python_code_error(
self, sequence, spec_vars, is_a_test_case
):
with raises(InvalidPythonCodeError) as excinfo:
CodeEvaluator.evaluate(sequence, spec_vars, is_a_test_case)
assert isinstance(excinfo.value, InvalidPythonCodeError)
test_data = [("${{1 + 1}}", "2"), ("${{'hi'*4}}", "hihihihi")]
@mark.context("when sequence matches the pattern")
@mark.context("when it is not a test case")
@mark.context("when code does not contain pre saved response")
@mark.it("should return evaluated code")
@mark.parametrize("sequence, expected", test_data)
def test_should_return_evaluated_code(self, sequence, expected):
assert CodeEvaluator.evaluate(sequence, {}) == expected
test_data = [
("${{response.text}}", "abcde"),
("${{response.status_code}}", "200"),
("${{response.text + 'xpto'}}", "abcdexpto"),
("${{'xpto' + response.text}}", "xptoabcde"),
("${{1+1}}", "2"),
]
@mark.context("when sequence matches the pattern")
@mark.context("when it is not a test case")
@mark.context("when code contains pre saved response")
@mark.it("should return evaluated code")
@mark.parametrize("sequence, expected", test_data)
def test_should_return_evaluated_code_2(self, sequence, expected, response):
assert (
CodeEvaluator.evaluate(sequence, {"response": response}) == expected
)
|
py | b4049905657d926fb4dd1ba79886578e6b691dcc | """
Module that interacts with the orchestrator CLI.
Provide the interfaces to ceph orch and in turn manage the orchestration engine.
"""
import logging
from datetime import datetime, timedelta
from json import loads
from time import sleep
from typing import List
from ceph.ceph import ResourceNotFoundError
from .ceph import CephCLI
from .ls import LSMixin
from .ps import PSMixin
from .remove import RemoveMixin
LOG = logging.getLogger()
class Orch(LSMixin, PSMixin, RemoveMixin, CephCLI):
"""Represent ceph orch command."""
direct_calls = ["ls", "ps"]
def check_service_exists(
self, service_name: str, ids: List[str], timeout: int = 300, interval: int = 5
) -> bool:
"""
Verify the provided service is running for the given list of ids.
Args:
service_name: The name of the service to be checked.
ids: The list of daemons to be checked for that service.
timeout: In seconds, the maximum allowed time. By default 5 minutes
interval: In seconds, the polling interval time.
Returns:
True if the service and the list of daemons are running else False.
"""
end_time = datetime.now() + timedelta(seconds=timeout)
while end_time > datetime.now():
sleep(interval)
out, err = self.ps({"base_cmd_args": {"format": "json"}})
out = loads(out)
daemons = [d for d in out if d.get("daemon_type") == service_name]
count = 0
for _id in ids:
for daemon in daemons:
if (
_id in daemon["daemon_id"]
and daemon["status_desc"] == "running"
):
count += 1
LOG.info("%s/%s %s daemon(s) up... retrying", count, len(ids), service_name)
if count == len(ids):
return True
# Identify the failure
out, err = self.ls({"base_cmd_args": {"format": "json"}})
for item in loads(out):
if (
service_name in item.get("service_type")
and item["status"].get("running") == 0
):
LOG.error("Service status(es): %s", item)
LOG.error("Service event(s): %s", item["events"])
return False
def get_role_service(self, service_name: str) -> str:
"""
Get service info by name.
Args:
service_name: service name
Returns:
service
Raises:
ResourceNotFound: when no resource with the provided is matched.
"""
out, _ = self.ls()
for svc in loads(out):
if service_name in svc.get("service_name"):
return svc
raise ResourceNotFoundError(f"No service names matched {service_name}")
def check_service(
self, service_name: str, timeout: int = 300, interval: int = 5, exist=True
) -> bool:
"""
check service existence based on the exist parameter
if exist is set, then validate its presence.
otherwise, for its removal.
Args:
service_name: service name
timeout: timeout in seconds
interval: interval in seconds
exist: boolean
Returns:
service
"""
end_time = datetime.now() + timedelta(seconds=timeout)
while end_time > datetime.now():
sleep(interval)
out, err = self.ls({"base_cmd_args": {"format": "json"}})
out = loads(out)
service = [d for d in out if d.get("service_name") == service_name]
if service_name not in service and not exist:
return True
elif service_name in service and exist:
return True
LOG.info("[%s] check for existence: %s, retrying" % (service_name, exist))
return False
|
py | b40499d55d31928d5a2cba9778e3acd073e6c615 | import torch
from torchvision import models
class resnet18(torch.nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.features = models.resnet18(pretrained=pretrained)
self.conv1 = self.features.conv1
self.bn1 = self.features.bn1
self.relu = self.features.relu
self.maxpool1 = self.features.maxpool
self.layer1 = self.features.layer1
self.layer2 = self.features.layer2
self.layer3 = self.features.layer3
self.layer4 = self.features.layer4
def forward(self, input):
x = self.conv1(input) # 1 / 2
x = self.relu(self.bn1(x))
x = self.maxpool1(x)
feature1 = self.layer1(x) # 1 / 4
feature2 = self.layer2(feature1) # 1 / 8
feature3 = self.layer3(feature2) # 1 / 16
feature4 = self.layer4(feature3) # 1 / 32
# global average pooling to build tail
tail = torch.mean(feature4, 3, keepdim=True)
tail = torch.mean(tail, 2, keepdim=True)
return feature3, feature4, tail
class resnet50(torch.nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.features = models.resnet50(pretrained=pretrained)
self.conv1 = self.features.conv1
self.bn1 = self.features.bn1
self.relu = self.features.relu
self.maxpool1 = self.features.maxpool
self.layer1 = self.features.layer1
self.layer2 = self.features.layer2
self.layer3 = self.features.layer3
self.layer4 = self.features.layer4
def forward(self, input):
x = self.conv1(input) # 1 / 2
x = self.relu(self.bn1(x))
x = self.maxpool1(x)
feature1 = self.layer1(x) # 1 / 4
feature2 = self.layer2(feature1) # 1 / 8
feature3 = self.layer3(feature2) # 1 / 16
feature4 = self.layer4(feature3) # 1 / 32
# global average pooling to build tail
tail = torch.mean(feature4, 3, keepdim=True)
tail = torch.mean(tail, 2, keepdim=True)
return feature3, feature4, tail
class resnet101(torch.nn.Module):
def __init__(self, pretrained=True):
super().__init__()
self.features = models.resnet101(pretrained=pretrained)
self.conv1 = self.features.conv1
self.bn1 = self.features.bn1
self.relu = self.features.relu
self.maxpool1 = self.features.maxpool
self.layer1 = self.features.layer1
self.layer2 = self.features.layer2
self.layer3 = self.features.layer3
self.layer4 = self.features.layer4
def forward(self, input):
x = self.conv1(input)
x = self.relu(self.bn1(x))
x = self.maxpool1(x)
feature1 = self.layer1(x) # 1 / 4
feature2 = self.layer2(feature1) # 1 / 8
feature3 = self.layer3(feature2) # 1 / 16
feature4 = self.layer4(feature3) # 1 / 32
# global average pooling to build tail
# https://paperswithcode.com/method/global-average-pooling
# from (n,d,r,c) to (n,d,1,1)
tail = torch.mean(feature4, 3, keepdim=True)
tail = torch.mean(tail, 2, keepdim=True)
return feature3, feature4, tail
def build_contextpath(name):
model = {
"resnet18": resnet18(pretrained=True),
"resnet50": resnet50(pretrained=True),
# "resnet101": resnet101(pretrained=True),
}
return model[name]
if __name__ == "__main__":
#
model_18 = build_contextpath("resnet18")
model_50 = build_contextpath("resnet50")
x = torch.rand(1, 3, 256, 256)
y_50 = model_50(x)
print(y_50)
|
py | b4049a25c4dea25e177e6ce069e34094942d55de | """
Source.py
Author: Jordan Mirocha
Affiliation: University of Colorado at Boulder
Created on: Sun Jul 22 16:28:08 2012
Description: Initialize a radiation source.
"""
import re, os
import numpy as np
from scipy.integrate import quad
from ..util import ParameterFile
from ..physics.Hydrogen import Hydrogen
from ..physics.Cosmology import Cosmology
from ..physics.Constants import erg_per_ev, E_LL
from ..static.IntegralTables import IntegralTable
from ..static.InterpolationTables import LookupTable
from ..util.SetDefaultParameterValues import SourceParameters, \
CosmologyParameters
from ..physics.CrossSections import PhotoIonizationCrossSection as sigma_E
try:
import h5py
except ImportError:
pass
np.seterr(all='ignore') # exp overflow occurs when integrating BB
# will return 0 as it should for x large
cosmo_pars = CosmologyParameters()
class Source(object):
def __init__(self, grid=None, logN=None, init_tabs=True):
"""
Initialize a radiation source object.
..note:: This is inherited by all other ares.sources classes.
Parameters
----------
grid: rt1d.static.Grid.Grid instance
logN: column densities over which to tabulate integral quantities
"""
# Update cosmological parameters
# Why is this necessary? J.M.12.27.2015
for par in cosmo_pars:
if par in self.pf:
continue
self.pf[par] = cosmo_pars[par]
# Modify parameter file if spectrum_file provided
#self._load_spectrum()
# Correct emission limits if none were provided
self.Emin = self.pf['source_Emin']
self.Emax = self.pf['source_Emax']
self.logEmin = np.log10(self.Emin)
self.logEmax = np.log10(self.Emax)
if self.pf['source_EminNorm'] == None:
self.pf['source_EminNorm'] = self.pf['source_Emin']
if self.pf['source_EmaxNorm'] == None:
self.pf['source_EmaxNorm'] = self.pf['source_Emax']
self.EminNorm = self.pf['source_EminNorm']
self.EmaxNorm = self.pf['source_EmaxNorm']
# Number of frequencies
#if self.discrete:
# self.E = np.array(self.pf['source_E'])
# self.LE = np.array(self.pf['source_LE'])
# self.Nfreq = len(self.E)
#
#if self.src._name == 'DiffuseSource':
# self.ionization_rate = self.src.ionization_rate
# self.secondary_ionization_rate = self.src.secondary_ionization_rate
# self.heating_rate = self.src.heating_rate
#
#self.Lbol = self.Lbol0 = self.BolometricLuminosity(0.0)
# Create lookup tables for integral quantities
if init_tabs and grid is not None:
self._create_integral_table(logN=logN)
@property
def cosm(self):
if not hasattr(self, '_cosm'):
if self.grid is None:
self._cosm = Cosmology(
omega_m_0=self.pf['omega_m_0'],
omega_l_0=self.pf['omega_l_0'],
omega_b_0=self.pf['omega_b_0'],
hubble_0=self.pf['hubble_0'],
helium_by_number=self.pf['helium_by_number'],
cmb_temp_0=self.pf['cmb_temp_0'],
approx_highz=self.pf['approx_highz'],
sigma_8=self.pf['sigma_8'],
primordial_index=self.pf['primordial_index'])
else:
self._cosm = self.grid.cosm
return self._cosm
@property
def multi_freq(self):
if not hasattr(self, '_multi_freq'):
self._multi_freq = self.discrete and not self.pf['source_multigroup']
return self._multi_freq
@property
def multi_group(self):
if not hasattr(self, '_multi_group'):
self._multi_group = self.discrete and self.pf['source_multigroup']
return self._multi_group
@property
def ionizing(self):
# See if source emits ionizing photons
# Should also be function of absorbers
if not hasattr(self, '_ionizing'):
self._ionizing = self.pf['source_Emax'] > E_LL
return self._ionizing
@property
def grid(self):
if not hasattr(self, '_grid'):
self._grid = None
return self._grid
@grid.setter
def grid(self, value):
self._grid = value
@property
def discrete(self):
if not hasattr(self, '_discrete'):
self._discrete = (self.pf['source_E'] != None) #\
#or self.pf['optically_thin']
return self._discrete
@property
def continuous(self):
if not hasattr(self, '_continuous'):
self._continuous = not self.discrete
return self._continuous
@property
def hydr(self):
if not hasattr(self, '_hydr'):
self._hydr = None
return self._hydr
@hydr.setter
def hydr(self, value):
self._hydr = value
@property
def frec(self):
"""
Compute average recycling fraction (i.e., spectrum-weighted frec).
"""
if self.hydr is None:
return None
n = np.arange(2, self.hydr.nmax)
En = np.array(map(self.hydr.ELyn, n))
In = np.array(map(self.Spectrum, En)) / En
fr = np.array(map(self.hydr.frec, n))
return np.sum(fr * In) / np.sum(In)
@property
def intrinsic_hardening(self):
if not hasattr(self, '_intrinsic_hardening'):
if 'source_hardening' in self.pf:
self._intrinsic_hardening = \
self.pf['source_hardening'] == 'intrinsic'
else:
self._intrinsic_hardening = False
return self._intrinsic_hardening
def _hardening_factor(self, E):
return np.exp(-10.**self.logN \
* (sigma_E(E, 0) + self.cosm.y * sigma_E(E, 1)))
@property
def logN(self):
if not hasattr(self, '_logN'):
if 'source_logN' in self.pf:
self._logN = self.pf['source_logN']
else:
self._logN = -np.inf
return self._logN
@property
def _normL(self):
if not hasattr(self, '_normL_'):
if self.intrinsic_hardening:
self._normL_ = 1. / quad(self._Intensity,
self.pf['source_EminNorm'], self.pf['source_EmaxNorm'])[0]
else:
integrand = lambda EE: self._Intensity(EE) / self._hardening_factor(EE)
self._normL_ = 1. / quad(integrand,
self.pf['source_EminNorm'], self.pf['source_EmaxNorm'])[0]
return self._normL_
#def _load_spectrum(self):
# """ Modify a few parameters if spectrum_file provided. """
#
# fn = self.pf['spectrum_file']
#
# if fn is None:
# return
#
# # Read spectrum - expect hdf5 with (at least) E, LE, and t datasets.
# if re.search('.hdf5', fn):
# f = h5py.File(fn)
# try:
# self.pf['tables_times'] = f['t'].value
# except:
# self.pf['tables_times'] = None
# self.pf['spectrum_evolving'] = False
#
# self.pf['spectrum_E'] = f['E'].value
# self.pf['spectrum_LE'] = f['LE'].value
# f.close()
#
# if len(self.pf['spectrum_LE'].shape) > 1 \
# and not self.pf['spectrum_evolving']:
# self.pf['spectrum_LE'] = self.pf['spectrum_LE'][0]
# else:
# spec = readtab(fn)
# if len(spec) == 2:
# self.pf['spectrum_E'], self.pf['spectrum_LE'] = spec
# else:
# self.pf['spectrum_E'], self.pf['spectrum_LE'], \
# self.pf['spectrum_t'] = spec
@property
def tables(self):
if not hasattr(self, '_tables'):
self._create_integral_table()
return self._tables
@property
def tab(self):
if not hasattr(self, '_tab'):
self._create_integral_table()
return self._tab
@property
def tabs(self):
if not hasattr(self, '_tabs'):
self._create_integral_table()
return self._tabs
def _create_integral_table(self, logN=None):
"""
Take tables and create interpolation functions.
"""
if self.discrete:
return
if self._name == 'diffuse':
return
if self.pf['source_table'] is None:
# Overide defaults if supplied - this is dangerous
if logN is not None:
self.pf.update({'tables_dlogN': [np.diff(tmp) for tmp in logN]})
self.pf.update({'tables_logNmin': [np.min(tmp) for tmp in logN]})
self.pf.update({'tables_logNmax': [np.max(tmp) for tmp in logN]})
# Tabulate away!
self._tab = IntegralTable(self.pf, self, self.grid, logN)
self._tabs = self.tab.TabulateRateIntegrals()
else:
self._tab = IntegralTable(self.pf, self, self.grid, logN)
self._tabs = self.tab.load(self.pf['source_table'])
self._setup_interp()
def _setup_interp(self):
self._tables = {}
for tab in self.tabs:
self._tables[tab] = \
LookupTable(self.pf, tab, self.tab.logN, self.tabs[tab],
self.tab.logx, self.tab.t)
@property
def sigma(self):
"""
Compute bound-free absorption cross-section for all frequencies.
"""
if not self.discrete:
return None
if not hasattr(self, '_sigma_all'):
self._sigma_all = np.array(map(sigma_E, self.E))
return self._sigma_all
@property
def Qdot(self):
"""
Returns number of photons emitted (s^-1) at all frequencies.
"""
if not hasattr(self, '_Qdot_all'):
self._Qdot_all = self.Lbol * self.LE / self.E / erg_per_ev
return self._Qdot_all
@property
def hnu_bar(self):
"""
Average ionizing (per absorber) photon energy in eV.
"""
if not hasattr(self, '_hnu_bar_all'):
self._hnu_bar_all = np.zeros_like(self.grid.zeros_absorbers)
self._qdot_bar_all = np.zeros_like(self.grid.zeros_absorbers)
for i, absorber in enumerate(self.grid.absorbers):
self._hnu_bar_all[i], self._qdot_bar_all[i] = \
self._FrequencyAveragedBin(absorber=absorber)
return self._hnu_bar_all
def AveragePhotonEnergy(self, Emin, Emax):
"""
Return average photon energy in supplied band.
"""
integrand = lambda EE: self.Spectrum(EE) * EE
norm = lambda EE: self.Spectrum(EE)
return quad(integrand, Emin, Emax)[0] / quad(norm, Emin, Emax)[0]
@property
def qdot_bar(self):
"""
Average ionizing photon luminosity (per absorber) in s^-1.
"""
if not hasattr(self, '_qdot_bar_all'):
hnu_bar = self.hnu_bar
return self._qdot_bar_all
@property
def sigma_bar(self):
"""
Frequency averaged cross section (single bandpass).
"""
if not hasattr(self, '_sigma_bar_all'):
self._sigma_bar_all = np.zeros_like(self.grid.zeros_absorbers)
for i, absorber in enumerate(self.grid.absorbers):
integrand = lambda x: self.Spectrum(x) \
* self.grid.bf_cross_sections[absorber](x) / x
self._sigma_bar_all[i] = self.Lbol \
* quad(integrand, self.grid.ioniz_thresholds[absorber],
self.Emax)[0] / self.qdot_bar[i] / erg_per_ev
return self._sigma_bar_all
@property
def sigma_tilde(self):
if not hasattr(self, '_sigma_tilde_all'):
self._sigma_tilde_all = np.zeros_like(self.grid.zeros_absorbers)
for i, absorber in enumerate(self.grid.absorbers):
integrand = lambda x: self.Spectrum(x) \
* self.grid.bf_cross_sections[absorber](x)
self._sigma_tilde_all[i] = quad(integrand,
self.grid.ioniz_thresholds[absorber], self.Emax)[0] \
/ self.fLbol_ionizing[i]
return self._sigma_tilde_all
@property
def fLbol_ionizing(self):
"""
Fraction of bolometric luminosity emitted above all ionization
thresholds.
"""
if not hasattr(self, '_fLbol_ioniz_all'):
self._fLbol_ioniz_all = np.zeros_like(self.grid.zeros_absorbers)
for i, absorber in enumerate(self.grid.absorbers):
self._fLbol_ioniz_all[i] = quad(self.Spectrum,
self.grid.ioniz_thresholds[absorber], self.Emax)[0]
return self._fLbol_ioniz_all
@property
def Gamma_bar(self):
"""
Return ionization rate (as a function of radius) assuming optical
depth to cells and of cells is small.
"""
if not hasattr(self, '_Gamma_bar_all'):
self._Gamma_bar_all = \
np.zeros([self.grid.dims, self.grid.N_absorbers])
for i, absorber in enumerate(self.grid.absorbers):
self._Gamma_bar_all[..., i] = self.Lbol * self.sigma_bar[i] \
* self.fLbol_ionizing[i] / 4. / np.pi / self.grid.r_mid**2 \
/ self.hnu_bar[i] / erg_per_ev
return self._Gamma_bar_all
@property
def gamma_bar(self):
"""
Return ionization rate (as a function of radius) assuming optical
depth to cells and of cells is small.
"""
if not hasattr(self, '_gamma_bar_all'):
self._gamma_bar_all = \
np.zeros([self.grid.dims, self.grid.N_absorbers,
self.grid.N_absorbers])
if not self.pf['secondary_ionization']:
return self._gamma_bar_all
for i, absorber in enumerate(self.grid.absorbers):
for j, otherabsorber in enumerate(self.grid.absorbers):
self._gamma_bar_all[..., i, j] = self.Gamma_bar[j] \
* (self.hnu_bar[j] * self.sigma_tilde[j] \
/ self.hnu_bar[i] / self.sigma_bar[j] \
- self.grid.ioniz_thresholds[otherabsorber] \
/ self.grid.ioniz_thresholds[absorber])
return self._gamma_bar_all
@property
def Heat_bar(self):
"""
Return ionization rate (as a function of radius) assuming optical
depth to cells and of cells is small.
"""
if not hasattr(self, '_Heat_bar_all'):
self._Heat_bar_all = \
np.zeros([self.grid.dims, self.grid.N_absorbers])
for i, absorber in enumerate(self.grid.absorbers):
self._Heat_bar_all[..., i] = self.Gamma_bar[..., i] \
* erg_per_ev * (self.hnu_bar[i] * self.sigma_tilde[i] \
/ self.sigma_bar[i] - self.grid.ioniz_thresholds[absorber])
return self._Heat_bar_all
def IonizingPhotonLuminosity(self, t=0, bin=None):
"""
Return Qdot (photons / s) for this source at energy E.
"""
if self.pf['source_type'] in [0, 1, 2]:
return self.Qdot[bin]
else:
# Currently only BHs have a time-varying bolometric luminosity
return self.BolometricLuminosity(t) * self.LE[bin] / self.E[bin] / erg_per_ev
#def _Intensity(self, E, i, Type, t=0, absorb=True):
# """
# Return quantity *proportional* to fraction of bolometric luminosity emitted
# at photon energy E. Normalization handled separately.
# """
#
# Lnu = self.src._Intensity(E, i, Type, t=t)
#
# # Apply absorbing column
# if self.SpectrumPars['logN'][i] > 0 and absorb:
# return Lnu * np.exp(-10.**self.SpectrumPars['logN'][i] \
# * (sigma_E(E, 0) + y * sigma_E(E, 1)))
# else:
# return Lnu
#
def Spectrum(self, E, t=0.0):
r"""
Return fraction of bolometric luminosity emitted at energy E.
Elsewhere denoted as :math:`I_{\nu}`, normalized such that
:math:`\int I_{\nu} d\nu = 1`
Parameters
----------
E: float
Emission energy in eV
t: float
Time in seconds since source turned on.
i: int
Index of component to include. If None, includes contribution
from all components.
Returns
-------
Fraction of bolometric luminosity emitted at E in units of
eV\ :sup:`-1`\.
"""
return self._normL * self._Intensity(E, t=t)
def BolometricLuminosity(self, t=0.0, M=None):
"""
Returns the bolometric luminosity of a source in units of erg/s.
For accreting black holes, the bolometric luminosity will increase
with time, hence the optional 't' and 'M' arguments.
"""
if self._name == 'bh':
return self.Luminosity(t, M)
else:
return self.Luminosity(t)
def _FrequencyAveragedBin(self, absorber='h_1', Emin=None, Emax=None,
energy_weighted=False):
"""
Bolometric luminosity / number of ionizing photons in spectrum in bandpass
spanning interval (Emin, Emax). Returns mean photon energy and number of
ionizing photons in band.
"""
if Emin is None:
Emin = max(self.grid.ioniz_thresholds[absorber], self.Emin)
if Emax is None:
Emax = self.Emax
if energy_weighted:
f = lambda x: x
else:
f = lambda x: 1.0
L = self.Lbol * quad(lambda x: self.Spectrum(x) * f(x), Emin, Emax)[0]
Q = self.Lbol * quad(lambda x: self.Spectrum(x) * f(x) / x, Emin,
Emax)[0] / erg_per_ev
return L / Q / erg_per_ev, Q
def dump(self, fn, E, clobber=False):
"""
Write SED out to file.
Parameters
----------
fn : str
Filename, suffix determines type. If 'hdf5' or 'h5' will write
to HDF5 file, otherwise, to ASCII.
E : np.ndarray
Array of photon energies at which to sample SED. Units = eV.
"""
if os.path.exists(fn) and (clobber == False):
raise OSError('%s exists!')
if re.search('.hdf5', fn) or re.search('.h5', fn):
out = 'hdf5'
else:
out = 'ascii'
LE = map(self.Spectrum, E)
if out == 'hdf5':
f = h5py.File(fn, 'w')
f.create_dataset('E', data=E)
f.create_dataset('LE', data=LE)
f.close()
else:
f = open(fn, 'w')
print >> f, "# E LE"
for i, nrg in enumerate(E):
print >> f, "%.8e %.8e" % (nrg, LE[i])
f.close()
print "Wrote %s." % fn
def sed_name(self, i=0):
"""
Return name of output file based on SED properties.
"""
name = '%s_logM_%.2g_Gamma_%.3g_fsc_%.3g_logE_%.2g-%.2g' % \
(self.SpectrumPars['type'][i], np.log10(self.src.M0),
self.src.spec_pars['alpha'][i],
self.src.spec_pars['fsc'][i], self.logEmin, self.logEmax)
return name
|
py | b4049a6c43d63551e7cff2562b057568e67922cf | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2021 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Mark Koennecke <[email protected]>
#
# *****************************************************************************
import time
from datetime import datetime
import numpy as np
from nicos import session
from nicos.core.device import Readable
from nicos.core.errors import NicosError
class NexusElementBase:
"""Interface class to define NeXus elements.
All NeXus elements ought to supply four methods:
- create() which is called when the NeXus structure is created and static
data is written
- update() for in place updating of already created items in the NeXus file
- append() increments the np counter when a new scan point is started
- results() saves the result of a scan points data
The default implementations of create(), results() and update() do nothing.
Overload when this is not sufficient.
"""
dtype = None
def __init__(self):
self.doAppend = False
self.np = 0
def create(self, name, h5parent, sinkhandler):
raise NotImplementedError()
def update(self, name, h5parent, sinkhandler, values):
pass
def results(self, name, h5parent, sinkhandler, results):
pass
def append(self, name, h5parent, sinkhandler, subset):
self.np = self.np + 1
def resize_dataset(self, dset):
idx = self.np + 1
if len(dset) < idx:
dset.resize((idx,))
def testAppend(self, sinkhandler):
self.doAppend = bool((hasattr(sinkhandler.startdataset,
'npoints') and
sinkhandler.startdataset.npoints > 1) or
hasattr(session, '_manualscan'))
self.np = 0
def determineType(self):
if self.dtype is None:
if isinstance(self.value, tuple):
if isinstance(self.value[0], int):
if self.value[0] < 2**32:
self.dtype = "int32"
else:
self.dtype = "int64"
elif isinstance(self.value[0], float):
self.dtype = "double"
elif isinstance(self.value[0], str):
self.dtype = "string"
if isinstance(self.value, str):
self.dtype = 'string'
def createAttributes(self, h5obj, sinkhandler):
if not hasattr(self, 'attrs'):
return
for key, val in self.attrs.items():
if isinstance(val, str):
val = NXAttribute(val, 'string')
val.create(key, h5obj, sinkhandler)
def scanlink(self, name, sinkhandler, h5parent, linkroot):
pass
class NXAttribute(NexusElementBase):
"""Placeholder for a NeXus Attribute."""
def __init__(self, value, dtype):
self.dtype = dtype
self.value = value
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
if self.dtype == 'string':
h5parent.attrs[name] = np.string_(self.value)
else:
h5parent.attrs.create(name, self.value, dtype=self.dtype)
class ConstDataset(NexusElementBase):
"""Placeholder for a Dataset with a constant value."""
def __init__(self, value, dtype, **attrs):
self.value = value
self.dtype = dtype
self.attrs = {}
for key, val in attrs.items():
if not isinstance(val, NXAttribute):
val = NXAttribute(val, 'string')
self.attrs[key] = val
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
if self.dtype == 'string':
dtype = 'S%d' % (len(self.value.encode('utf-8')) + 1)
dset = h5parent.create_dataset(name, (1,), dtype)
dset[0] = np.array(self.value.encode('utf-8'), dtype=dtype)
else:
dset = h5parent.create_dataset(name, (1,), dtype=self.dtype)
dset[0] = self.value
self.createAttributes(dset, sinkhandler)
class DeviceAttribute(NXAttribute):
"""Placeholder for a device attribute.
This creates a NeXus group or dataset attribute from the value or
parameter of the device.
"""
def __init__(self, device, parameter='value', dtype=None, defaultval=None):
NXAttribute.__init__(self, defaultval, dtype)
self.device = device
self.parameter = parameter
self.dtype = dtype
self.defaultval = defaultval
self.np = 0
self.doAppend = False
def create(self, name, h5parent, sinkhandler):
if (self.device, self.parameter) in sinkhandler.dataset.metainfo:
self.value = \
sinkhandler.dataset.metainfo[(self.device, self.parameter)][0]
else:
self.value = self.defaultval
if self.value is not None:
self.determineType()
NXAttribute.create(self, name, h5parent, sinkhandler)
class DeviceDataset(NexusElementBase):
"""Placeholder for a device.#
This creates a NeXus dataset from the value or a parameter of a device.
"""
def __init__(self, device, parameter='value', dtype=None, defaultval=None,
**attr):
self.device = device
self.parameter = parameter
self.dtype = dtype
self.defaultval = defaultval
self.attrs = {}
self.doAppend = False
for key, val in attr.items():
if not isinstance(val, NXAttribute):
val = NXAttribute(val, 'string')
self.attrs[key] = val
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
if (self.device, self.parameter) in sinkhandler.dataset.metainfo:
self.value = sinkhandler.dataset.metainfo[
(self.device, self.parameter)]
else:
self.value = (self.defaultval,)
if self.value[0] is not None:
self.determineType()
else:
try:
dev = session.getDevice(self.device)
self.value = (getattr(dev, self.parameter, self.defaultval),)
self.determineType()
except Exception as e:
session.log.warning(
'Warning: failed to locate data for %s %s in NICOS (%s)',
self.device, self.parameter, e)
return
if self.parameter == 'value':
self.testAppend(sinkhandler)
if self.dtype == 'string':
dtype = 'S%d' % (len(self.value[0].encode('utf-8')) + 1)
dset = h5parent.create_dataset(name, (1,), dtype=dtype)
dset[0] = np.array(self.value[0].encode('utf-8'), dtype=dtype)
else:
if self.doAppend:
dset = h5parent.create_dataset(name, (1,), maxshape=(None,),
dtype=self.dtype)
else:
dset = h5parent.create_dataset(name, (1,), dtype=self.dtype)
dset[0] = self.value[0]
if 'units' not in self.attrs:
if self.parameter in ['target']:
try:
inf = session.getDevice(self.device).info()
self.attrs['units'] = NXAttribute(inf[0][3], 'string')
except NicosError:
pass
elif len(self.value) > 2:
dset.attrs['units'] = np.string_(self.value[2])
self.createAttributes(dset, sinkhandler)
def update(self, name, h5parent, sinkhandler, values):
if (self.device, self.parameter) in sinkhandler.dataset.metainfo:
self.value = sinkhandler.dataset.metainfo[
(self.device, self.parameter)]
dset = h5parent[name]
if self.dtype != 'string':
self.resize_dataset(dset)
dset[self.np] = self.value[0]
else:
# This data missing is normal
pass
def results(self, name, h5parent, sinkhandler, results):
if name not in h5parent:
# can happen, when we cannot find the device on creation
return
dset = h5parent[name]
for dev in sinkhandler.dataset.devices:
if dev.name == self.device:
value = dev.read()
if self.doAppend:
self.resize_dataset(dset)
dset[self.np] = value
def scanlink(self, name, sinkhandler, h5parent, linkroot):
for dev in sinkhandler.dataset.devices:
if dev.name == self.device:
dset = h5parent[name]
parent = sinkhandler.h5file[linkroot]
parent[name] = dset
dset.attrs['target'] = np.string_(dset.name)
class DetectorDataset(NexusElementBase):
"""Placeholder for a detector data dataset."""
def __init__(self, nicosname, dtype, **attr):
self.nicosname = nicosname
self.dtype = dtype
# Hack for countmode which is a short text
if self.dtype == 'string':
self.dtype = 'S30'
self.attrs = {}
for key, val in attr.items():
if not isinstance(val, NXAttribute):
val = NXAttribute(val, 'string')
self.attrs[key] = val
NexusElementBase.__init__(self)
# At creation time, I do not yet have a value for detector data. This is
# why the dtype needs to be specified. Values can only get written on
# update()
def create(self, name, h5parent, sinkhandler):
self.testAppend(sinkhandler)
if self.doAppend:
dset = h5parent.create_dataset(name, (1,), maxshape=(None,),
dtype=self.dtype)
else:
dset = h5parent.create_dataset(name, (1,), dtype=self.dtype)
self.createAttributes(dset, sinkhandler)
def update(self, name, h5parent, sinkhandler, values):
dset = h5parent[name]
if self.nicosname == 'mode':
m = list(sinkhandler.startdataset.preset.keys())[0]
if m == 't':
mode = 'timer'
else:
mode = 'monitor'
dset[0] = np.string_(mode)
elif self.nicosname == 'preset':
mp = sinkhandler.startdataset.preset.values()
self.resize_dataset(dset)
dset[0] = list(mp)[0]
else:
try:
val = sinkhandler.dataset.values[self.nicosname]
self.resize_dataset(dset)
dset[self.np] = val
except Exception:
# This is normal: the dataset called with
# SinkHandler.updateValues()
# does not necessarily contain all the data
pass
def results(self, name, h5parent, sinkhandler, results):
dset = h5parent[name]
if self.nicosname == 'mode':
m = list(sinkhandler.startdataset.preset.keys())[0]
if m == 't':
mode = 'timer'
else:
mode = 'monitor'
dset[0] = np.string_(mode)
elif self.nicosname == 'preset':
mp = sinkhandler.startdataset.preset.values()
dset[0] = list(mp)[0]
else:
try:
val = sinkhandler.dataset.values[self.nicosname]
if self.doAppend:
self.resize_dataset(dset)
dset[self.np] = val
except Exception:
session.log.warning('failed to find result for %s',
self.nicosname)
class ImageDataset(NexusElementBase):
"""Placeholder for a detector image."""
def __init__(self, detectorIDX, imageIDX, **attrs):
self.detectorIDX = detectorIDX
self.imageIDX = imageIDX
self.attrs = {}
self.doAppend = False
self.np = 0
self.valid = True
for key, val in attrs.items():
if not isinstance(val, NXAttribute):
val = NXAttribute(val, 'string')
self.attrs[key] = val
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
self.testAppend(sinkhandler)
if len(sinkhandler.dataset.detectors) <= self.detectorIDX:
session.log.warning('Cannot find detector with ID %d',
self.detectorIDX)
self.valid = False
return
det = sinkhandler.dataset.detectors[self.detectorIDX]
arinfo = det.arrayInfo()
myDesc = arinfo[self.imageIDX]
rawshape = myDesc.shape
if self.doAppend:
shape = list(rawshape)
shape.insert(0, 1)
maxshape = list(rawshape)
maxshape.insert(0, None)
chonk = list(rawshape)
chonk.insert(0, 1)
dset = h5parent.create_dataset(name, shape, maxshape=maxshape,
chunks=tuple(chonk),
dtype=myDesc.dtype,
compression='gzip')
else:
dset = h5parent.create_dataset(name, rawshape,
chunks=tuple(rawshape),
dtype=myDesc.dtype,
compression='gzip')
self.createAttributes(dset, sinkhandler)
def resize_dataset(self, dset, sinkhandler):
det = sinkhandler.dataset.detectors[self.detectorIDX]
arinfo = det.arrayInfo()
myDesc = arinfo[self.imageIDX]
rawshape = myDesc.shape
idx = self.np + 1
shape = list(rawshape)
shape.insert(0, idx)
dset.resize(shape)
def results(self, name, h5parent, sinkhandler, results):
dset = h5parent[name]
det = sinkhandler.dataset.detectors[self.detectorIDX]
data = results.get(det.name)
if data is not None:
array = data[1][self.imageIDX]
if self.doAppend:
idx = self.np + 1
if len(dset) < idx:
self.resize_dataset(dset, sinkhandler)
dset[self.np] = array
else:
h5parent[name][...] = array
class NamedImageDataset(ImageDataset):
"""Placeholder for a detector image identified by name."""
def __init__(self, image_name, **attrs):
self._image_name = image_name
ImageDataset.__init__(self, -1, -1, **attrs)
def create(self, name, h5parent, sinkhandler):
detID = 0
imageID = 0
for det in sinkhandler.dataset.detectors:
arList = det.arrayInfo()
for ar in arList:
if ar.name == self._image_name:
self.detectorIDX = detID
self.imageIDX = imageID
break
imageID += 1
detID += 1
if self.detectorIDX == -1 or self.imageIDX == -1:
self.log.warning('Cannot find named image %s', self._image_name)
self.valid = False
return
ImageDataset.create(self, name, h5parent, sinkhandler)
class NXLink(NexusElementBase):
"""Placeholder for a NeXus link.
I can only create it on update because the order of tree traversal is
undefined and in create() the object to link against may not have been
created yet.
"""
def __init__(self, target):
self.target = target
self.linkCreated = False
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
# The __init__() linkCreated is only initialised at template
# initialisation time!
self.linkCreated = False
def update(self, name, h5parent, sinkhandler, values):
if not self.linkCreated:
try:
other = sinkhandler.h5file[self.target]
except KeyError:
session.log.warning(
'Cannot link %s to %s, target does not exist',
name, self.target)
return
h5parent[name] = other
other.attrs['target'] = np.string_(self.target)
self.linkCreated = True
class NXScanLink(NexusElementBase):
"""Placeholder to identify where the scan devices ought to be linked to."""
def __init__(self):
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
pass
class NXTime(NexusElementBase):
"""Placeholder for a NeXus compatible time entry."""
def formatTime(self):
return datetime.now().isoformat(sep=' ', timespec='seconds')
def create(self, name, h5parent, sinkhandler):
time_str = self.formatTime()
dtype = 'S%d' % (len(time_str) + 5)
dset = h5parent.create_dataset(name, (1,), dtype=dtype)
dset[0] = np.string_(time_str)
def update(self, name, h5parent, sinkhandler, values):
if name.find('end') >= 0:
dset = h5parent[name]
dset[0] = np.string_(self.formatTime())
class StartTime(NXTime):
"""Place holder for the start time of a measurement."""
def __init__(self):
NXTime.__init__(self)
self.time = 0
def formatTime(self):
return datetime.fromtimestamp(self.time).isoformat(timespec='seconds')
def create(self, name, h5parent, sinkhandler):
self.time = sinkhandler.dataset.started
NXTime.create(self, name, h5parent, sinkhandler)
class EndTime(StartTime):
"""Place holder for the end time of a measurement."""
def create(self, name, h5parent, sinkhandler):
self.time = sinkhandler.dataset.finished or time.time()
NXTime.create(self, name, h5parent, sinkhandler)
def update(self, name, h5parent, sinkhandler, values):
self.time = sinkhandler.dataset.finished or time.time()
dset = h5parent[name]
dset[0] = np.string_(self.formatTime())
class NexusSampleEnv(NexusElementBase):
"""Placeholder for storing sample environment data.
It looks at the dataset.environment field and creates a NXlog structure
with the sample environment devices name. To this NXlog structure, incoming
data is appended whenever data can be found.
"""
def __init__(self, update_interval=10):
self._update_interval = update_interval
self._last_update = {}
NexusElementBase.__init__(self)
def createNXlog(self, h5parent, dev):
loggroup = h5parent.create_group(dev.name)
loggroup.attrs['NX_class'] = np.string_('NXlog')
dset = loggroup.create_dataset('time', (1,), maxshape=(None,),
dtype='float32')
dset[0] = .0
dset.attrs['start'] = time.strftime('%Y-%m-%d %H:%M:%S',
time.localtime(self.starttime))
dset = loggroup.create_dataset('value', (1,), maxshape=(None,),
dtype='float32')
dset[0] = dev.read()
self._last_update[dev.name] = time.time()
def create(self, name, h5parent, sinkhandler):
self.starttime = time.time()
for dev in sinkhandler.dataset.environment:
# There can be DeviceStatistics in the environment.
# We do not know how to write those
if isinstance(dev, Readable):
self.createNXlog(h5parent, dev)
def updatelog(self, h5parent, dataset):
current_time = time.time()
for devidx, dev in enumerate(dataset.environment):
if not isinstance(dev, Readable):
continue
loggroup = h5parent[dev.name]
dset = loggroup['value']
val = dataset.envvaluelist[devidx]
if val is None:
return
idx = len(dset)
# We need to control the amount of data written as update
# gets called frequently. This tests:
# - The value has changed at all
# - Against a maximum update interval
if val != dset[idx - 1] and \
current_time > self._last_update[dev.name] + self._update_interval:
dset.resize((idx + 1,))
dset[idx] = val
dset = loggroup['time']
dset.resize((idx + 1,))
dset[idx] = current_time - self.starttime
self._last_update[dev.name] = current_time
def update(self, name, h5parent, sinkhandler, values):
self.updatelog(h5parent, sinkhandler.dataset)
def results(self, name, h5parent, sinkhandler, results):
self.updatelog(h5parent, sinkhandler.dataset)
class CalcData(NexusElementBase):
""" Place holder base class for all classes which calculate data for the
NeXus file. Derived classes have to implement two methods:
- _shape(dataset) which returns the shape of the calculate data as a tuple
- _calcData(dataset) which actually calculates the data value. The return
value must be a numpy array.
Derived classes also must make sure that self.dtype points to a sensible
value. The default is float32.
"""
def __init__(self, **attrs):
self.attrs = {}
self.doAppend = False
self.np = 0
self.valid = True
for key, val in attrs.items():
if not isinstance(val, NXAttribute):
val = NXAttribute(val, 'string')
self.attrs[key] = val
self.dtype = "float32"
NexusElementBase.__init__(self)
def create(self, name, h5parent, sinkhandler):
self.testAppend(sinkhandler)
if not self.valid:
return
rawshape = self._shape(sinkhandler.dataset)
if self.doAppend:
shape = list(rawshape)
shape.insert(0, 1)
maxshape = list(rawshape)
maxshape.insert(0, None)
chonk = list(rawshape)
chonk.insert(0, 1)
dset = h5parent.create_dataset(name, shape, maxshape=maxshape,
chunks=tuple(chonk),
dtype=self.dtype,
compression='gzip')
else:
dset = h5parent.create_dataset(name, rawshape,
chunks=tuple(rawshape),
dtype=self.dtype,
compression='gzip')
self.createAttributes(dset, sinkhandler)
def results(self, name, h5parent, sinkhandler, _results):
if not self.valid:
return
data = self._calcData(sinkhandler.dataset)
if data is not None:
dset = h5parent[name]
if self.doAppend:
if len(dset) < self.np + 1:
self.resize_dataset(dset, sinkhandler)
dset[self.np] = data
else:
h5parent[name][...] = data
def resize_dataset(self, dset, sinkhandler):
rawshape = self._shape(dset)
idx = self.np + 1
shape = list(rawshape)
shape.insert(0, idx)
dset.resize(shape)
def _shape(self, dataset):
raise NotImplementedError("Derived class must implement _shape(dset)")
def _calcData(self, dataset):
raise NotImplementedError("Derived class must implement "
"_calcData(dset)")
|
py | b4049bedbdc55bb2084cc99ef592609d04f3861b | def split_by_position(linked_promotions, context):
"""
Split the list of promotions into separate lists, grouping
by position, and write these lists to the passed context.
"""
for linked_promotion in linked_promotions:
promotion = linked_promotion.content_object
if not promotion:
continue
key = 'promotions_%s' % linked_promotion.position.lower()
if key not in context:
context[key] = []
context[key].append(promotion)
|
py | b4049c38307da862b6b1f1c89734c377ce7efac2 | # The usage of __init__.py file is it indicates that the files in this dir are part of python package.
# Without an __init__.py file, you cannot import files from another directory in a Python project.
#__init__ can be empty but can also be used to set up imports. There are 3 kinds of imports:
# 1. example_package/__init__.py and explicit imports:
# from .example import sample_funciton
# Where sample_function is the name of the function and .file1 is the name of the module/file
#Now in main.py I can direcly improt this function
# 2.main_package/__init__.py and standard import:
# import example_package
# This imports the entire package
# 3. main_package/__init__.py and wild card import
# In __init__.py, set an __all__ variable to a list of the modules/files in the package.
# __all__ = ["file1", "file2", "file3"]
|
py | b4049d2a637dd4532963cd00dc88b3aa8b11af49 | import yaml
def get_gsize_from_igenomes(igenomes, build):
if build:
with open(igenomes) as f:
igenomes = yaml.load(f, Loader=yaml.FullLoader)
if igenomes:
if igenomes["params"]["genomes"][build]:
if "macs_gsize" in igenomes["params"]["genomes"][build]:
return igenomes["params"]["genomes"][build]["macs_gsize"]
return ""
igenomes = snakemake.input[0]
gsize_out = snakemake.output[0]
config_gsize = snakemake.params.get("extra", "")
build = snakemake.params.get("build", "")
if config_gsize:
with open(gsize_out, 'w') as f:
f.write("-g {}".format(config_gsize))
else:
with open(gsize_out, 'w') as f:
macs_gsize = get_gsize_from_igenomes(igenomes, build)
if macs_gsize:
f.write("-g {}".format(macs_gsize))
else:
f.write("")
|
py | b4049d2b59afe7aac53f17441e447e2a7d3e796b | # @Time : 2020/6/26
# @Author : Shanlei Mu
# @Email : [email protected]
# UPDATE:
# @Time : 2020/8/7
# @Author : Shanlei Mu
# @Email : [email protected]
"""
recbole.model.loss
#######################
Common Loss in recommender system
"""
import torch
import torch.nn as nn
class BPRLoss(nn.Module):
""" BPRLoss, based on Bayesian Personalized Ranking
Args:
- gamma(float): Small value to avoid division by zero
Shape:
- Pos_score: (N)
- Neg_score: (N), same shape as the Pos_score
- Output: scalar.
Examples::
>>> loss = BPRLoss()
>>> pos_score = torch.randn(3, requires_grad=True)
>>> neg_score = torch.randn(3, requires_grad=True)
>>> output = loss(pos_score, neg_score)
>>> output.backward()
"""
def __init__(self, gamma=1e-10):
super(BPRLoss, self).__init__()
self.gamma = gamma
def forward(self, pos_score, neg_score):#pos_score,neg_score都是tensor数组,不是一个数
loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()
return loss
class RegLoss(nn.Module):
""" RegLoss, L2 regularization on model parameters
"""
def __init__(self):
super(RegLoss, self).__init__()
def forward(self, parameters):
reg_loss = None
for W in parameters:
if reg_loss is None:
reg_loss = W.norm(2)
else:
reg_loss = reg_loss + W.norm(2)
return reg_loss
class EmbLoss(nn.Module):
""" EmbLoss, regularization on embeddings
"""
def __init__(self, norm=2):
super(EmbLoss, self).__init__()
self.norm = norm
def forward(self, *embeddings):
emb_loss = torch.zeros(1).to(embeddings[-1].device)
for embedding in embeddings:
emb_loss += torch.norm(embedding, p=self.norm)
emb_loss /= embeddings[-1].shape[0]
return emb_loss
class EmbMarginLoss(nn.Module):
""" EmbMarginLoss, regularization on embeddings
"""
def __init__(self, power=2):
super(EmbMarginLoss, self).__init__()
self.power = power
def forward(self, *embeddings):
dev = embeddings[-1].device
cache_one = torch.tensor(1.0).to(dev)
cache_zero = torch.tensor(0.0).to(dev)
emb_loss = torch.tensor(0.).to(dev)
for embedding in embeddings:
norm_e = torch.sum(embedding ** self.power, dim=1, keepdim=True)
emb_loss += torch.sum(torch.max(norm_e - cache_one, cache_zero))
return emb_loss
|
py | b4049d482697de3fb25219c4a80199a40fbca76b | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.cloud.aiplatform_v1beta1.types import index_endpoint as gca_index_endpoint
from google.cloud.aiplatform_v1beta1.types import operation
from google.protobuf import field_mask_pb2 # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1",
manifest={
"CreateIndexEndpointRequest",
"CreateIndexEndpointOperationMetadata",
"GetIndexEndpointRequest",
"ListIndexEndpointsRequest",
"ListIndexEndpointsResponse",
"UpdateIndexEndpointRequest",
"DeleteIndexEndpointRequest",
"DeployIndexRequest",
"DeployIndexResponse",
"DeployIndexOperationMetadata",
"UndeployIndexRequest",
"UndeployIndexResponse",
"UndeployIndexOperationMetadata",
},
)
class CreateIndexEndpointRequest(proto.Message):
r"""Request message for
[IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint].
Attributes:
parent (str):
Required. The resource name of the Location to create the
IndexEndpoint in. Format:
``projects/{project}/locations/{location}``
index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint):
Required. The IndexEndpoint to create.
"""
parent = proto.Field(proto.STRING, number=1,)
index_endpoint = proto.Field(
proto.MESSAGE, number=2, message=gca_index_endpoint.IndexEndpoint,
)
class CreateIndexEndpointOperationMetadata(proto.Message):
r"""Runtime operation information for
[IndexEndpointService.CreateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.CreateIndexEndpoint].
Attributes:
generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata):
The operation generic information.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
class GetIndexEndpointRequest(proto.Message):
r"""Request message for
[IndexEndpointService.GetIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.GetIndexEndpoint]
Attributes:
name (str):
Required. The name of the IndexEndpoint resource. Format:
``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}``
"""
name = proto.Field(proto.STRING, number=1,)
class ListIndexEndpointsRequest(proto.Message):
r"""Request message for
[IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints].
Attributes:
parent (str):
Required. The resource name of the Location from which to
list the IndexEndpoints. Format:
``projects/{project}/locations/{location}``
filter (str):
Optional. An expression for filtering the results of the
request. For field names both snake_case and camelCase are
supported.
- ``index_endpoint`` supports = and !=. ``index_endpoint``
represents the IndexEndpoint ID, ie. the last segment of
the IndexEndpoint's
[resourcename][google.cloud.aiplatform.v1beta1.IndexEndpoint.name].
- ``display_name`` supports =, != and regex() (uses
`re2 <https://github.com/google/re2/wiki/Syntax>`__
syntax)
- ``labels`` supports general map functions that is:
``labels.key=value`` - key:value equality
``labels.key:* or labels:key - key existence A key including a space must be quoted.``\ labels."a
key"`.
Some examples:
- ``index_endpoint="1"``
- ``display_name="myDisplayName"``
- \`regex(display_name, "^A") -> The display name starts
with an A.
- ``labels.myKey="myValue"``
page_size (int):
Optional. The standard list page size.
page_token (str):
Optional. The standard list page token. Typically obtained
via
[ListIndexEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsResponse.next_page_token]
of the previous
[IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints]
call.
read_mask (google.protobuf.field_mask_pb2.FieldMask):
Optional. Mask specifying which fields to
read.
"""
parent = proto.Field(proto.STRING, number=1,)
filter = proto.Field(proto.STRING, number=2,)
page_size = proto.Field(proto.INT32, number=3,)
page_token = proto.Field(proto.STRING, number=4,)
read_mask = proto.Field(proto.MESSAGE, number=5, message=field_mask_pb2.FieldMask,)
class ListIndexEndpointsResponse(proto.Message):
r"""Response message for
[IndexEndpointService.ListIndexEndpoints][google.cloud.aiplatform.v1beta1.IndexEndpointService.ListIndexEndpoints].
Attributes:
index_endpoints (Sequence[google.cloud.aiplatform_v1beta1.types.IndexEndpoint]):
List of IndexEndpoints in the requested page.
next_page_token (str):
A token to retrieve next page of results. Pass to
[ListIndexEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListIndexEndpointsRequest.page_token]
to obtain that page.
"""
@property
def raw_page(self):
return self
index_endpoints = proto.RepeatedField(
proto.MESSAGE, number=1, message=gca_index_endpoint.IndexEndpoint,
)
next_page_token = proto.Field(proto.STRING, number=2,)
class UpdateIndexEndpointRequest(proto.Message):
r"""Request message for
[IndexEndpointService.UpdateIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.UpdateIndexEndpoint].
Attributes:
index_endpoint (google.cloud.aiplatform_v1beta1.types.IndexEndpoint):
Required. The IndexEndpoint which replaces
the resource on the server.
update_mask (google.protobuf.field_mask_pb2.FieldMask):
Required. The update mask applies to the resource. See
[google.protobuf.FieldMask][google.protobuf.FieldMask].
"""
index_endpoint = proto.Field(
proto.MESSAGE, number=1, message=gca_index_endpoint.IndexEndpoint,
)
update_mask = proto.Field(
proto.MESSAGE, number=2, message=field_mask_pb2.FieldMask,
)
class DeleteIndexEndpointRequest(proto.Message):
r"""Request message for
[IndexEndpointService.DeleteIndexEndpoint][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeleteIndexEndpoint].
Attributes:
name (str):
Required. The name of the IndexEndpoint resource to be
deleted. Format:
``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}``
"""
name = proto.Field(proto.STRING, number=1,)
class DeployIndexRequest(proto.Message):
r"""Request message for
[IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
Attributes:
index_endpoint (str):
Required. The name of the IndexEndpoint resource into which
to deploy an Index. Format:
``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}``
deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex):
Required. The DeployedIndex to be created
within the IndexEndpoint.
"""
index_endpoint = proto.Field(proto.STRING, number=1,)
deployed_index = proto.Field(
proto.MESSAGE, number=2, message=gca_index_endpoint.DeployedIndex,
)
class DeployIndexResponse(proto.Message):
r"""Response message for
[IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
Attributes:
deployed_index (google.cloud.aiplatform_v1beta1.types.DeployedIndex):
The DeployedIndex that had been deployed in
the IndexEndpoint.
"""
deployed_index = proto.Field(
proto.MESSAGE, number=1, message=gca_index_endpoint.DeployedIndex,
)
class DeployIndexOperationMetadata(proto.Message):
r"""Runtime operation information for
[IndexEndpointService.DeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.DeployIndex].
Attributes:
generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata):
The operation generic information.
deployed_index_id (str):
The unique index id specified by user
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
deployed_index_id = proto.Field(proto.STRING, number=2,)
class UndeployIndexRequest(proto.Message):
r"""Request message for
[IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
Attributes:
index_endpoint (str):
Required. The name of the IndexEndpoint resource from which
to undeploy an Index. Format:
``projects/{project}/locations/{location}/indexEndpoints/{index_endpoint}``
deployed_index_id (str):
Required. The ID of the DeployedIndex to be
undeployed from the IndexEndpoint.
"""
index_endpoint = proto.Field(proto.STRING, number=1,)
deployed_index_id = proto.Field(proto.STRING, number=2,)
class UndeployIndexResponse(proto.Message):
r"""Response message for
[IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
"""
class UndeployIndexOperationMetadata(proto.Message):
r"""Runtime operation information for
[IndexEndpointService.UndeployIndex][google.cloud.aiplatform.v1beta1.IndexEndpointService.UndeployIndex].
Attributes:
generic_metadata (google.cloud.aiplatform_v1beta1.types.GenericOperationMetadata):
The operation generic information.
"""
generic_metadata = proto.Field(
proto.MESSAGE, number=1, message=operation.GenericOperationMetadata,
)
__all__ = tuple(sorted(__protobuf__.manifest))
|
py | b4049ebd41bba8075869ec9d02972a69eb9d05dd | # container-service-extension
# Copyright (c) 2017 VMware, Inc. All Rights Reserved.
# SPDX-License-Identifier: BSD-2-Clause
import click
from container_service_extension.cluster import load_from_metadata
from container_service_extension.cluster import TYPE_MASTER
from container_service_extension.cluster import TYPE_NODE
import logging
from pyvcloud.vcd.client import _WellKnownEndpoint
from pyvcloud.vcd.client import BasicLoginCredentials
from pyvcloud.vcd.client import Client
from pyvcloud.vcd.client import TaskStatus
from pyvcloud.vcd.org import Org
from pyvcloud.vcd.task import Task
from pyvcloud.vcd.vapp import VApp
from pyvcloud.vcd.vdc import VDC
from pyvcloud.vcd.vsphere import VSphere
import re
import requests
import threading
import time
import traceback
import uuid
LOGGER = logging.getLogger(__name__)
OK = 200
CREATED = 201
ACCEPTED = 202
INTERNAL_SERVER_ERROR = 500
OP_CREATE_CLUSTER = 'create_cluster'
OP_DELETE_CLUSTER = 'delete_cluster'
MAX_HOST_NAME_LENGTH = 25 - 4
def get_new_broker(config):
if config['broker']['type'] == 'default':
return DefaultBroker(config)
else:
return None
def spinning_cursor():
while True:
for cursor in '|/-\\':
yield cursor
spinner = spinning_cursor()
def task_callback(task):
message = '\x1b[2K\r{}: {}, status: {}'.format(
task.get('operationName'), task.get('operation'), task.get('status')
)
if hasattr(task, 'Progress'):
message += ', progress: %s%%' % task.Progress
if task.get('status').lower() in [TaskStatus.QUEUED.value,
TaskStatus.PENDING.value,
TaskStatus.PRE_RUNNING.value,
TaskStatus.RUNNING.value]:
message += ' %s ' % spinner.next()
click.secho(message)
class DefaultBroker(threading.Thread):
def __init__(self, config):
threading.Thread.__init__(self)
self.config = config
self.host = config['vcd']['host']
self.username = config['vcd']['username']
self.password = config['vcd']['password']
self.version = config['vcd']['api_version']
self.verify = config['vcd']['verify']
self.log = config['vcd']['log']
def _connect_sysadmin(self):
if not self.verify:
LOGGER.warning('InsecureRequestWarning: '
'Unverified HTTPS request is being made. '
'Adding certificate verification is strongly '
'advised.')
requests.packages.urllib3.disable_warnings()
self.client_sysadmin = Client(uri=self.host,
api_version=self.version,
verify_ssl_certs=self.verify,
log_file='sysadmin.log',
log_headers=True,
log_bodies=True
)
self.client_sysadmin.set_credentials(
BasicLoginCredentials(self.username,
'System',
self.password))
def _connect_tenant(self, headers):
token = headers.get('x-vcloud-authorization')
accept_header = headers.get('Accept')
version = accept_header.split('version=')[1]
self.client_tenant = Client(uri=self.host,
api_version=version,
verify_ssl_certs=self.verify,
log_file='tenant.log',
log_headers=True,
log_bodies=True
)
session = self.client_tenant.rehydrate_from_token(token)
return {'user_name': session.get('user'),
'user_id': session.get('userId'),
'org_name': session.get('org'),
'org_href': self.client_tenant._get_wk_endpoint(
_WellKnownEndpoint.LOGGED_IN_ORG)
}
def is_valid_name(self, name):
"""Validates that the cluster name against the pattern.
"""
if len(name) > MAX_HOST_NAME_LENGTH:
return False
if name[-1] == '.':
name = name[:-1]
allowed = re.compile("(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in name.split("."))
def run(self):
LOGGER.debug('thread started op=%s' % self.op)
if self.op == OP_CREATE_CLUSTER:
self.create_cluster_thread()
elif self.op == OP_DELETE_CLUSTER:
self.delete_cluster_thread()
def list_clusters(self, headers, body):
result = {}
try:
result['body'] = []
result['status_code'] = OK
self._connect_tenant(headers)
clusters = load_from_metadata(self.client_tenant,
get_leader_ip=True)
for cluster in clusters:
names = []
for node in cluster['master_nodes']:
names.append(node['name'])
cluster['master_nodes'] = names
names = []
for node in cluster['nodes']:
names.append(node['name'])
cluster['nodes'] = names
result['body'] = clusters
except Exception:
LOGGER.error(traceback.format_exc())
result['body'] = []
result['status_code'] = INTERNAL_SERVER_ERROR
result['message'] = traceback.format_exc()
return result
def create_cluster(self, headers, body):
result = {}
result['body'] = {}
cluster_name = body['name']
vdc_name = body['vdc']
node_count = body['node_count']
LOGGER.debug('about to create cluster %s on %s with %s nodes',
cluster_name,
vdc_name,
node_count)
result['body'] = {'message': 'can\'t create cluster'}
result['status_code'] = INTERNAL_SERVER_ERROR
try:
if not self.is_valid_name(cluster_name):
raise Exception('Invalid cluster name')
self.tenant_info = self._connect_tenant(headers)
self.headers = headers
self.body = body
self.cluster_id = str(uuid.uuid4())
self.op = OP_CREATE_CLUSTER
self._connect_sysadmin()
task = Task(self.client_sysadmin)
self.t = task.update(
TaskStatus.RUNNING.value,
'vcloud.cse',
'Creating cluster %s(%s)' % (cluster_name, self.cluster_id),
self.op,
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href']
)
self.daemon = True
self.start()
response_body = {}
response_body['name'] = cluster_name
response_body['cluster_id'] = self.cluster_id
response_body['task_href'] = self.t.get('href')
result['body'] = response_body
result['status_code'] = ACCEPTED
except Exception as e:
result['body'] = {'message': e.message}
LOGGER.error(traceback.format_exc())
return result
def create_cluster_thread(self):
cluster_name = self.body['name']
task = Task(self.client_sysadmin)
try:
clusters = load_from_metadata(self.client_tenant,
name=cluster_name)
LOGGER.debug(clusters)
if len(clusters) != 0:
raise Exception('Cluster already exists.')
self.t = task.update(
TaskStatus.RUNNING.value,
'vcloud.cse',
'Creating nodes %s(%s)' % (cluster_name, self.cluster_id),
self.op,
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href'],
task_href=self.t.get('href'))
org_resource = self.client_tenant.get_org()
org = Org(self.client_tenant, org_resource=org_resource)
vdc_resource = org.get_vdc(self.body['vdc'])
master_count = 1
node_count = int(self.body['node_count'])
catalog = self.config['broker']['catalog']
master_template = self.config['broker']['master_template']
node_template = self.config['broker']['node_template']
master_cpu = self.config['broker']['master_cpu']
master_mem = self.config['broker']['master_mem']
node_cpu = self.config['broker']['node_cpu']
node_mem = self.config['broker']['node_mem']
vdc = VDC(self.client_tenant, vdc_resource=vdc_resource)
masters = []
for n in range(master_count):
time.sleep(1)
name = cluster_name + '-m%s' % str(n+1)
masters.append(vdc.instantiate_vapp(name,
catalog,
master_template,
memory=master_mem,
cpu=master_cpu))
nodes = []
for n in range(node_count):
time.sleep(1)
name = cluster_name + '-n%s' % str(n+1)
nodes.append(vdc.instantiate_vapp(name,
catalog,
node_template,
memory=node_mem,
cpu=node_cpu))
tagged = set([])
while len(tagged) < (master_count + node_count):
node = None
node_type = None
for n in masters:
if n.get('name') not in tagged:
node = n
node_type = TYPE_MASTER
break
if node is None:
for n in nodes:
if n.get('name') not in tagged:
node = n
node_type = TYPE_NODE
break
time.sleep(15)
if node is not None:
LOGGER.debug('about to tag %s, href=%s',
node.get('name'),
node.get('href'))
try:
tags = {}
tags['cse.cluster.id'] = self.cluster_id
tags['cse.node.type'] = node_type
tags['cse.cluster.name'] = cluster_name
vapp = VApp(self.client_tenant,
vapp_href=node.get('href'))
for k, v in tags.items():
task = vapp.set_metadata('GENERAL',
'READWRITE',
k,
v)
self.client_tenant.get_task_monitor().\
wait_for_status(
task=task,
timeout=600,
poll_frequency=5,
fail_on_status=None,
expected_target_statuses=[TaskStatus.SUCCESS], # NOQA
callback=None)
tagged.update([node.get('name')])
LOGGER.debug('tagged %s', node.get('name'))
except Exception:
LOGGER.error(
'can''t tag %s at this moment, will retry later',
node.get('name'))
LOGGER.error(traceback.format_exc())
time.sleep(1)
time.sleep(4)
self.customize_nodes()
except Exception as e:
LOGGER.error(traceback.format_exc())
self.t = task.update(
TaskStatus.ERROR.value,
'vcloud.cse',
self.op,
'create cluster',
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href'],
task_href=self.t.get('href'),
error_message=str(e))
def delete_cluster(self, cluster_name, headers, body):
result = {}
result['body'] = {}
LOGGER.debug('about to delete cluster with name: %s', cluster_name)
result['status_code'] = INTERNAL_SERVER_ERROR
try:
self.cluster_name = cluster_name
self.tenant_info = self._connect_tenant(headers)
self.headers = headers
self.body = body
self.op = OP_DELETE_CLUSTER
self.cluster_id = ''
self._connect_sysadmin()
task = Task(self.client_sysadmin)
self.t = task.update(
TaskStatus.RUNNING.value,
'vcloud.cse',
'Deleting cluster %s(%s)' % (self.cluster_name,
self.cluster_id),
self.op,
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
self.cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href']
)
self.daemon = True
self.start()
response_body = {}
response_body['cluster_name'] = self.cluster_name
response_body['task_href'] = self.t.get('href')
result['body'] = response_body
result['status_code'] = ACCEPTED
except Exception as e:
result['body'] = {'message': e.message}
LOGGER.error(traceback.format_exc())
return result
def delete_cluster_thread(self):
LOGGER.debug('about to delete cluster with name: %s',
self.cluster_name)
task = Task(self.client_sysadmin)
try:
clusters = load_from_metadata(self.client_tenant,
name=self.cluster_name)
LOGGER.debug(clusters)
if len(clusters) != 1:
raise Exception('Cluster not found.')
cluster = clusters[0]
# self.cluster_id = cluster['cluster_id']
vdc = None
tasks = []
for node in cluster['master_nodes']+cluster['nodes']:
if vdc is None:
vdc = VDC(self.client_tenant, vdc_href=cluster['vdc_href'])
LOGGER.debug('about to delete vapp %s', node['name'])
try:
tasks.append(vdc.delete_vapp(node['name'], force=True))
except Exception:
pass
time.sleep(1)
# TODO(wait until all nodes are deleted)
self.t = task.update(
TaskStatus.SUCCESS.value,
'vcloud.cse',
self.op,
'delete cluster',
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
self.cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href'],
task_href=self.t.get('href'))
except Exception as e:
LOGGER.error(traceback.format_exc())
self.t = task.update(
TaskStatus.ERROR.value,
'vcloud.cse',
self.op,
'delete cluster',
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
self.cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href'],
task_href=self.t.get('href'),
error_message=str(e))
def customize_nodes(self, max_retries=60):
cluster_name = self.body['name']
node_count = int(self.body['node_count'])
task = Task(self.client_sysadmin)
self.t = task.update(
TaskStatus.RUNNING.value,
'vcloud.cse',
'Customizing nodes %s(%s)' % (cluster_name, self.cluster_id),
self.op,
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href'],
task_href=self.t.get('href'))
nodes = []
n = 0
all_nodes_configured = False
while n < max_retries:
try:
nodes = []
clusters = load_from_metadata(self.client_tenant,
cluster_id=self.cluster_id)
LOGGER.debug(clusters)
assert len(clusters) == 1
cluster = clusters[0]
for cluster_node in cluster['master_nodes'] + cluster['nodes']:
node = {'name': cluster_node['name']}
vapp = VApp(self.client_tenant,
vapp_href=cluster_node['href'])
node['ip'] = vapp.get_primary_ip(cluster_node['name'])
node['moid'] = vapp.get_vm_moid(cluster_node['name'])
if cluster_node['name'].endswith('-m1'):
node['node_type'] = TYPE_MASTER
else:
node['node_type'] = TYPE_NODE
nodes.append(node)
for node in nodes:
if 'ip' in node.keys() and len(node['ip']) > 0:
pass
else:
n += 1
raise Exception('missing ip, retry %s', n)
all_nodes_configured = True
break
except Exception as e:
LOGGER.error(e.message)
time.sleep(5)
if not all_nodes_configured:
LOGGER.error('ip not configured in at least one node')
return
LOGGER.debug('ip configured in all nodes')
vs = VSphere(self.config['vcs']['host'],
self.config['vcs']['username'],
self.config['vcs']['password'],
port=int(self.config['vcs']['port']))
vs.connect()
master_node = None
for node in nodes:
vm = vs.get_vm_by_moid(node['moid'])
commands = [
['/bin/echo', '\'127.0.0.1 localhost\' | sudo tee /etc/hosts'], # NOQA
['/bin/echo', '\'127.0.1.1 %s\' | sudo tee -a /etc/hosts' % node['name']], # NOQA
['/bin/echo', '\'::1 localhost ip6-localhost ip6-loopback\' | sudo tee -a /etc/hosts'], # NOQA
['/bin/echo', '\'ff02::1 ip6-allnodes\' | sudo tee -a /etc/hosts'], # NOQA
['/bin/echo', '\'ff02::2 ip6-allrouters\' | sudo tee -a /etc/hosts'], # NOQA
['/usr/bin/sudo', 'hostnamectl set-hostname %s' % node['name']], # NOQA
['/bin/mkdir', '$HOME/.ssh'],
['/bin/chmod', 'go-rwx $HOME/.ssh'],
['/bin/echo', '\'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDFS5HL4CBlWrZscohhqdVwUa815Pi3NaCijfdvs0xCNF2oP458Xb3qYdEmuFWgtl3kEM4hR60/Tzk7qr3dmAfY7GPqdGhQsZEnvUJq0bfDAh0KqhdrqiIqx9zlKWnR65gl/u7Qkck2jiKkqjfxZwmJcuVCu+zQZCRC80XKwpyOudLKd/zJz9tzJxJ7+yltu9rNdshCEfP+OR1QoY2hFRH1qaDHTIbDdlF/m0FavapH7+ScufOY/HNSSYH7/SchsxK3zywOwGV1e1z//HHYaj19A3UiNdOqLkitKxFQrtSyDfClZ/0SwaVxh4jqrKuJ5NT1fbN2bpDWMgffzD9WWWZbDvtYQnl+dBjDnzBZGo8miJ87lYiYH9N9kQfxXkkyPziAjWj8KZ8bYQWJrEQennFzsbbreE8NtjsM059RXz0kRGeKs82rHf0mTZltokAHjoO5GmBZb8sZTdZyjfo0PTgaNCENe0bRDTrAomM99LhW2sJ5ZjK7SIqpWFaU+P+qgj4s88btCPGSqnh0Fea1foSo5G57l5YvfYpJalW0IeiynrO7TRuxEVV58DJNbYyMCvcZutuyvNq0OpEQYXRM2vMLQX3ZX3YhHMTlSXXcriqvhOJ7aoNae5aiPSlXvgFi/wP1x1aGYMEsiqrjNnrflGk9pIqniXsJ/9TFwRh9m4GktQ== [email protected]\' > $HOME/.ssh/authorized_keys'], # NOQA
['/bin/chmod', 'go-rwx $HOME/.ssh/authorized_keys']
]
if node['node_type'] == TYPE_MASTER:
master_node = node
commands.append(['/bin/rm', '-f /tmp/kubeadm-init.out'])
commands.append(['/usr/bin/sudo', '/usr/bin/kubeadm init --pod-network-cidr=10.244.0.0/16 --apiserver-advertise-address={ip} > /tmp/kubeadm-init.out'.format(ip=node['ip'])]) # NOQA
for command in commands:
LOGGER.debug('executing %s %s on %s',
command[0],
command[1],
vm)
result = vs.execute_program_in_guest(
vm,
self.config['broker']['username'],
self.config['broker']['password'],
command[0],
command[1],
wait_for_completion=True)
LOGGER.debug('executed %s %s on %s: %s',
command[0],
command[1],
vm,
result)
if master_node is not None:
vm = vs.get_vm_by_moid(master_node['moid'])
response = vs.download_file_from_guest(
vm,
self.config['broker']['username'],
self.config['broker']['password'],
'/tmp/kubeadm-init.out'
)
token = [x for x in response.content.splitlines() if x.strip().startswith('[token] Using token: ')][0].split()[-1] # NOQA
cmd = '/usr/bin/sudo'
args = '/usr/bin/kubeadm join --token %s %s:6443' % (token, master_node['ip']) # NOQA
for node in nodes:
vm = vs.get_vm_by_moid(node['moid'])
if node['node_type'] == TYPE_NODE:
LOGGER.debug('executing %s %s on %s', cmd, args, vm)
cmd = '/usr/bin/sudo'
result = vs.execute_program_in_guest(
vm,
self.config['broker']['username'],
self.config['broker']['password'],
cmd,
args,
wait_for_completion=True
)
LOGGER.debug('executed %s %s on %s: %s',
cmd,
args,
vm,
result)
elif node['node_type'] == TYPE_MASTER:
commands = [
# ['/usr/bin/sudo', 'kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-'], # NOQA
['/usr/bin/sudo', 'kubectl --kubeconfig=/etc/kubernetes/admin.conf apply -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel-rbac.yml'], # NOQA
['/usr/bin/sudo', 'kubectl --kubeconfig=/etc/kubernetes/admin.conf create -f https://raw.githubusercontent.com/coreos/flannel/master/Documentation/kube-flannel.yml'] # NOQA
]
if node_count == 0:
commands.append(['/usr/bin/sudo', 'kubectl --kubeconfig=/etc/kubernetes/admin.conf taint nodes --all node-role.kubernetes.io/master-']) # NOQA
for command in commands:
LOGGER.debug('executing %s %s on %s',
command[0],
command[1],
vm)
result = vs.execute_program_in_guest(
vm,
self.config['broker']['username'],
self.config['broker']['password'],
command[0],
command[1],
wait_for_completion=True)
LOGGER.debug('executed %s %s on %s: %s',
command[0],
command[1],
vm,
result)
task = Task(self.client_sysadmin)
self.t = task.update(
TaskStatus.SUCCESS.value,
'vcloud.cse',
self.op,
'create cluster',
'',
None,
'urn:cse:cluster:%s' % self.cluster_id,
cluster_name,
'application/vcloud.cse.cluster+xml',
self.tenant_info['user_id'],
self.tenant_info['user_name'],
org_href=self.tenant_info['org_href'],
task_href=self.t.get('href'))
def get_cluster_config(self, cluster_name, headers, body):
result = {}
result['body'] = {}
result['status_code'] = INTERNAL_SERVER_ERROR
try:
self._connect_tenant(headers)
self.headers = headers
self.cluster_name = cluster_name
clusters = load_from_metadata(self.client_tenant,
name=self.cluster_name,
get_leader_ip=True)
LOGGER.debug(clusters)
assert len(clusters) == 1
cluster = clusters[0]
assert len(cluster['master_nodes']) == 1
result['body'] = {'cluster_config': '\'%s\'' % cluster_name}
vs = VSphere(self.config['vcs']['host'],
self.config['vcs']['username'],
self.config['vcs']['password'],
port=int(self.config['vcs']['port']))
vs.connect()
vm = vs.get_vm_by_moid(cluster['leader_moid'])
commands = [
['/usr/bin/sudo', 'chmod a+r /etc/kubernetes/admin.conf'] # NOQA
]
for command in commands:
LOGGER.debug('executing %s on %s', command[0], vm)
r = vs.execute_program_in_guest(
vm,
self.config['broker']['username'],
self.config['broker']['password'],
command[0],
command[1]
)
time.sleep(1)
LOGGER.debug('executed %s on %s: %s',
command[0],
vm,
r)
response = vs.download_file_from_guest(
vm,
self.config['broker']['username'],
self.config['broker']['password'],
'/etc/kubernetes/admin.conf')
result['body'] = response.content
result['status_code'] = response.status_code
except Exception as e:
LOGGER.error(traceback.format_exc())
result['body'] = {'message': e.message}
return result
|
py | b4049f10e4d1dca0d8c3abc233db6f79b2523661 | __author__ = "Qianli Wang und Nazar Sopiha"
__copyright__ = "Copyright (c) 2019 qiaw99"
# https://github.com/qiaw99/WS2019-20/blob/master/LICENSE
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
import numpy as np
import matplotlib.pyplot as plt
# Aufgabe a)
def bubblesort(ls):
temp = 0
swap = True
stop = len(ls) - 1
while swap:
swap = False
for i in range(stop):
if(ls[i] > ls[i + 1]):
temp += 1
ls[i], ls[i + 1] = ls[i + 1], ls[i]
swap = True
stop = stop - 1
return (ls, temp)
# Aufgabe b)
#implementiert mithilfe einer Hilfeliste, damit wir auf die Rekursion verzichten können
def hilfeArray_mergesort(A):
N = len(A)
partsLength = 1
while partsLength < N:
for i in range(0, N // partsLength, 2):
hilfeArray_merge(A, i * partsLength, (i + 1) * partsLength)
partsLength <<= 1
return (A, counter)
def hilfeArray_merge(A, firstPartlowerIndex, secondPartlowerIndex):
global counter
counter = 0
firstPartIndex = firstPartlowerIndex
secondPartIndex = secondPartlowerIndex
rightBoundary = min(2 * secondPartlowerIndex - firstPartlowerIndex - 1, len(A) - 1)
mergedList = []
while secondPartlowerIndex - 1 - firstPartIndex >= 0 and rightBoundary - secondPartIndex >= 0:
counter += 2
if A[firstPartIndex] <= A[secondPartIndex]:
counter += 1
mergedList.append(A[firstPartIndex])
firstPartIndex += 1
else:
counter += 1
mergedList.append(A[secondPartIndex])
secondPartIndex += 1
while secondPartlowerIndex - 1 - firstPartIndex >= 0:
counter += 1
mergedList.append(A[firstPartIndex])
firstPartIndex += 1
while rightBoundary - secondPartIndex >= 0:
counter += 1
mergedList.append(A[secondPartIndex])
secondPartIndex += 1
for i in range(firstPartlowerIndex, rightBoundary + 1):
A[i] = mergedList[i - firstPartlowerIndex]
def main():
for k in range(1, 5):
ls = []
#lists which store counters for each sort-algorithms
helper1 = []
helper2 = []
maximum1 = []
minimum1 = []
durchschnitt1 = []
maximum2 = []
minimum2 = []
durchschnitt2 = []
for i in range(100):
temp = [random.random() for j in range(10**k)]
ls.append(temp)
######
a, b = bubblesort(temp)
helper1.append(b / 10**k)
maximum1.append(max(helper1))
minimum1.append(min(helper1))
durchschnitt1.append(sum(helper1) / len(helper1))
######
x, y = hilfeArray_mergesort(temp)
helper2.append(y / 10**k)
maximum2.append(max(helper2))
minimum2.append(min(helper2))
durchschnitt2.append(sum(helper2) / len(helper2))
print("Waiting for the result... with length ", 10**k)
print(len(maximum1))
x1 = np.arange(0, 100)
y1 = np.array(maximum1)
plt.title("Bubblesort")
#plt.subplot(1, 1, 1)
plt.plot(x1, y1, 'r', label = "maximum")
y2 = np.array(minimum1)
plt.plot(x1, y2, 'b', label = "minimum")
y3 = np.array(durchschnitt1)
plt.plot(x1, y3, 'g', label = "durchschnitt")
plt.legend(loc = 'upper right')
plt.show()
x2 = np.arange(0, 100)
y4 = np.array(maximum2)
plt.title("Mergsort")
#plt.subplot(1, 1, 1)
plt.plot(x2, y4, 'r', label = "maximum")
y5 = np.array(minimum2)
plt.plot(x1, y5, 'b', label = "minimum")
y6 = np.array(durchschnitt2)
plt.plot(x1, y6, 'g', label = "durchschnitt")
plt.legend(loc = 'upper right')
plt.show()
if __name__ == "__main__":
main()
|
py | b4049f8d24dfa12c5a3a4993e95788e1e69491de | from .model_spec import ModelSpec
__all__ = [ModelSpec] |
py | b404a22f0c55d3133bb42f242448a7a37cf17b6e | # BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/main/LICENSE
import pytest # noqa: F401
import numpy as np # noqa: F401
import awkward as ak # noqa: F401
def test_UnmaskedArray():
content_float64 = ak.layout.NumpyArray(
np.array([0.25, 0.5, 3.5, 4.5, 5.5], dtype=np.float64)
)
array_float64 = ak.layout.UnmaskedArray(content_float64)
assert ak.to_list(array_float64) == [0.25, 0.5, 3.5, 4.5, 5.5]
assert str(ak.type(content_float64)) == "float64"
assert str(ak.type(ak.Array(content_float64))) == "5 * float64"
assert str(ak.type(array_float64)) == "?float64"
assert str(ak.type(ak.Array(array_float64))) == "5 * ?float64"
assert np.can_cast(np.float32, np.float64) is True
assert np.can_cast(np.float64, np.float32, "unsafe") is True
assert np.can_cast(np.float64, np.int8, "unsafe") is True
content_float32 = ak.values_astype(content_float64, "float32", highlevel=False)
array_float32 = ak.layout.UnmaskedArray(content_float32)
assert ak.to_list(array_float32) == [0.25, 0.5, 3.5, 4.5, 5.5]
assert str(ak.type(content_float32)) == "float32"
assert str(ak.type(ak.Array(content_float32))) == "5 * float32"
assert str(ak.type(array_float32)) == "?float32"
assert str(ak.type(ak.Array(array_float32))) == "5 * ?float32"
content_int8 = ak.values_astype(content_float64, "int8", highlevel=False)
array_int8 = ak.layout.UnmaskedArray(content_int8)
assert ak.to_list(array_int8) == [0, 0, 3, 4, 5]
assert str(ak.type(content_int8)) == "int8"
assert str(ak.type(ak.Array(content_int8))) == "5 * int8"
assert str(ak.type(array_int8)) == "?int8"
assert str(ak.type(ak.Array(array_int8))) == "5 * ?int8"
content_from_int8 = ak.values_astype(content_int8, "float64", highlevel=False)
array_from_int8 = ak.layout.UnmaskedArray(content_from_int8)
assert ak.to_list(array_from_int8) == [0, 0, 3, 4, 5]
assert str(ak.type(content_from_int8)) == "float64"
assert str(ak.type(ak.Array(content_from_int8))) == "5 * float64"
assert str(ak.type(array_from_int8)) == "?float64"
assert str(ak.type(ak.Array(array_from_int8))) == "5 * ?float64"
def test_RegularArray_and_ListArray():
content = ak.layout.NumpyArray(
np.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9])
)
offsets = ak.layout.Index64(np.array([0, 3, 3, 5, 6, 10, 10]))
listoffsetarray = ak.layout.ListOffsetArray64(offsets, content)
regulararray = ak.layout.RegularArray(listoffsetarray, 2, zeros_length=0)
starts = ak.layout.Index64(np.array([0, 1]))
stops = ak.layout.Index64(np.array([2, 3]))
listarray = ak.layout.ListArray64(starts, stops, regulararray)
assert str(ak.type(content)) == "float64"
assert str(ak.type(regulararray)) == "2 * var * float64"
assert str(ak.type(listarray)) == "var * 2 * var * float64"
regulararray_int8 = ak.values_astype(regulararray, "int8", highlevel=False)
assert str(ak.type(regulararray_int8)) == "2 * var * int8"
listarray_bool = ak.values_astype(listarray, "bool", highlevel=False)
assert str(ak.type(listarray_bool)) == "var * 2 * var * bool"
def test_ufunc_afterward():
assert (
ak.values_astype(ak.Array([{"x": 1.1}, {"x": 3.3}]), np.float32)["x"] + 1
).tolist() == [2.0999999046325684, 4.300000190734863]
def test_string():
assert ak.values_astype(
ak.Array([{"x": 1.1, "y": "hello"}]), np.float32
).tolist() == [{"x": 1.100000023841858, "y": "hello"}]
|
py | b404a2cc273173f7f5864ebce63ad4e2fb82ef80 | from common_fixtures import * # NOQA
def test_zone_list(admin_client, client):
zones = admin_client.list_zone()
assert len(zones) > 0
zones = client.list_zone()
assert len(zones) >= 0
|
py | b404a38e79108f034eeacd7a85b929da2f79f94d | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Useful tools for running commands."""
import contextlib
import os
import shlex
import shutil
import stat
import subprocess
import sys
from functools import lru_cache
from pathlib import Path
from typing import Dict, List, Mapping, Optional
from airflow_breeze.utils.console import console
from airflow_breeze.utils.path_utils import AIRFLOW_SOURCES_ROOT
def run_command(
cmd: List[str],
*,
check: bool = True,
verbose: bool = False,
dry_run: bool = False,
no_output_dump_on_exception: bool = False,
env: Optional[Mapping[str, str]] = None,
cwd: Optional[Path] = None,
input: Optional[str] = None,
**kwargs,
) -> Optional[subprocess.CompletedProcess]:
"""
Runs command passed as list of strings with some extra functionality over POpen (kwargs from PoPen can
be used in this command even if not explicitly specified).
It prints diagnostics when requested, also allows to "dry_run" the commands rather than actually
execute them.
An important factor for having this command running tool is to be able (in verbose mode) to directly
copy&paste the verbose output and run the command manually - including all the environment variables
needed to run the command.
:param cmd: command to run
:param check: whether to check status value and run exception (same as POpem)
:param verbose: print commands when running
:param dry_run: do not execute "the" command - just print what would happen
:param no_output_dump_on_exception: whether to suppress printing logs from output when command fails
:param env: mapping of environment variables to set for the run command
:param cwd: working directory to set for the command
:param input: input string to pass to stdin of the process
:param kwargs: kwargs passed to POpen
"""
workdir: str = str(cwd) if cwd else os.getcwd()
if verbose or dry_run:
command_to_print = ' '.join(shlex.quote(c) for c in cmd)
# if we pass environment variables to execute, then
env_to_print = ' '.join(f'{key}="{val}"' for (key, val) in env.items()) if env else ''
if env_to_print:
env_to_print += ' '
console.print(f"\n[bright_blue]Working directory {workdir} [/]\n")
# Soft wrap allows to copy&paste and run resulting output as it has no hard EOL
console.print(f"\n[bright_blue]{env_to_print}{command_to_print}[/]\n", soft_wrap=True)
if dry_run:
return None
try:
cmd_env = os.environ.copy()
if env:
cmd_env.update(env)
return subprocess.run(cmd, input=input, check=check, env=cmd_env, cwd=workdir, **kwargs)
except subprocess.CalledProcessError as ex:
if not no_output_dump_on_exception:
if ex.stdout:
console.print("[blue]========================= OUTPUT start ============================[/]")
console.print(ex.stdout)
console.print("[blue]========================= OUTPUT end ==============================[/]")
if ex.stderr:
console.print("[red]========================= STDERR start ============================[/]")
console.print(ex.stderr)
console.print("[red]========================= STDERR end ==============================[/]")
if not check:
raise
return None
def check_pre_commit_installed(verbose: bool) -> bool:
"""
Check if pre-commit is installed in the right version.
:param verbose: print commands when running
:return: True is the pre-commit is installed in the right version.
"""
# Local import to make autocomplete work
import yaml
from pkg_resources import parse_version
pre_commit_config = yaml.safe_load((AIRFLOW_SOURCES_ROOT / ".pre-commit-config.yaml").read_text())
min_pre_commit_version = pre_commit_config["minimum_pre_commit_version"]
pre_commit_name = "pre-commit"
is_installed = False
if shutil.which(pre_commit_name) is not None:
process = run_command(
[pre_commit_name, "--version"], verbose=verbose, check=True, capture_output=True, text=True
)
if process and process.stdout:
pre_commit_version = process.stdout.split(" ")[-1].strip()
if parse_version(pre_commit_version) >= parse_version(min_pre_commit_version):
console.print(
f"\n[green]Package {pre_commit_name} is installed. "
f"Good version {pre_commit_version} (>= {min_pre_commit_version})[/]\n"
)
is_installed = True
else:
console.print(
f"\n[red]Package name {pre_commit_name} version is wrong. It should be"
f"aat least {min_pre_commit_version} and is {pre_commit_version}.[/]\n\n"
)
else:
console.print(
"\n[bright_yellow]Could not determine version of pre-commit. "
"You might need to update it![/]\n"
)
is_installed = True
else:
console.print(f"\n[red]Error: Package name {pre_commit_name} is not installed.[/]")
if not is_installed:
console.print("\nPlease install using https://pre-commit.com/#install to continue\n")
return is_installed
def get_filesystem_type(filepath):
"""
Determine the type of filesystem used - we might want to use different parameters if tmpfs is used.
:param filepath: path to check
:return: type of filesystem
"""
# We import it locally so that click autocomplete works
import psutil
root_type = "unknown"
for part in psutil.disk_partitions():
if part.mountpoint == '/':
root_type = part.fstype
continue
if filepath.startswith(part.mountpoint):
return part.fstype
return root_type
def instruct_build_image(python: str):
"""Print instructions to the user that they should build the image"""
console.print(f'[bright_yellow]\nThe CI image for ' f'python version {python} may be outdated[/]\n')
console.print('Please run this command at earliest convenience:\n')
console.print(f' `./breeze build-image --python {python}`\n')
@contextlib.contextmanager
def working_directory(source_path: Path):
"""
# Equivalent of pushd and popd in bash script.
# https://stackoverflow.com/a/42441759/3101838
:param source_path:
:return:
"""
prev_cwd = Path.cwd()
os.chdir(source_path)
try:
yield
finally:
os.chdir(prev_cwd)
def change_file_permission(file_to_fix: Path):
"""Update file permissions to not be group-writeable. Needed to solve cache invalidation problems."""
if file_to_fix.exists():
current = stat.S_IMODE(os.stat(file_to_fix).st_mode)
new = current & ~stat.S_IWGRP & ~stat.S_IWOTH # Removes group/other write permission
os.chmod(file_to_fix, new)
def change_directory_permission(directory_to_fix: Path):
"""Update directory permissions to not be group-writeable. Needed to solve cache invalidation problems."""
if directory_to_fix.exists():
current = stat.S_IMODE(os.stat(directory_to_fix).st_mode)
new = current & ~stat.S_IWGRP & ~stat.S_IWOTH # Removes group/other write permission
new = (
new | stat.S_IXGRP | stat.S_IXOTH
) # Add group/other execute permission (to be able to list directories)
os.chmod(directory_to_fix, new)
@working_directory(AIRFLOW_SOURCES_ROOT)
def fix_group_permissions():
"""Fixes permissions of all the files and directories that have group-write access."""
console.print("[bright_blue]Fixing group permissions[/]")
files_to_fix_result = run_command(['git', 'ls-files', './'], capture_output=True, text=True)
if files_to_fix_result.returncode == 0:
files_to_fix = files_to_fix_result.stdout.strip().split('\n')
for file_to_fix in files_to_fix:
change_file_permission(Path(file_to_fix))
directories_to_fix_result = run_command(
['git', 'ls-tree', '-r', '-d', '--name-only', 'HEAD'], capture_output=True, text=True
)
if directories_to_fix_result.returncode == 0:
directories_to_fix = directories_to_fix_result.stdout.strip().split('\n')
for directory_to_fix in directories_to_fix:
change_directory_permission(Path(directory_to_fix))
def is_repo_rebased(repo: str, branch: str):
"""Returns True if the local branch contains latest remote SHA (i.e. if it is rebased)"""
# We import it locally so that click autocomplete works
import requests
gh_url = f"https://api.github.com/repos/{repo}/commits/{branch}"
headers_dict = {"Accept": "application/vnd.github.VERSION.sha"}
latest_sha = requests.get(gh_url, headers=headers_dict).text.strip()
rebased = False
process = run_command(['git', 'log', '--format=format:%H'], capture_output=True, text=True)
output = process.stdout.strip().splitlines() if process is not None else "missing"
if latest_sha in output:
rebased = True
return rebased
def check_if_buildx_plugin_installed(verbose: bool) -> bool:
"""
Checks if buildx plugin is locally available.
:param verbose: print commands when running
:return True if the buildx plugin is installed.
"""
is_buildx_available = False
check_buildx = ['docker', 'buildx', 'version']
docker_buildx_version_process = run_command(
check_buildx,
verbose=verbose,
no_output_dump_on_exception=True,
capture_output=True,
text=True,
)
if (
docker_buildx_version_process
and docker_buildx_version_process.returncode == 0
and docker_buildx_version_process.stdout != ''
):
is_buildx_available = True
return is_buildx_available
def prepare_build_command(prepare_buildx_cache: bool, verbose: bool) -> List[str]:
"""
Prepare build command for docker build. Depending on whether we have buildx plugin installed or not,
and whether we run cache preparation, there might be different results:
* if buildx plugin is installed - `docker buildx` command is returned - using regular or cache builder
depending on whether we build regular image or cache
* if no buildx plugin is installed, and we do not prepare cache, regular docker `build` command is used.
* if no buildx plugin is installed, and we prepare cache - we fail. Cache can only be done with buildx
:param prepare_buildx_cache: whether we are preparing buildx cache.
:param verbose: print commands when running
:return: command to use as docker build command
"""
build_command_param = []
is_buildx_available = check_if_buildx_plugin_installed(verbose=verbose)
if is_buildx_available:
if prepare_buildx_cache:
build_command_param.extend(["buildx", "build", "--builder", "airflow_cache", "--progress=tty"])
cmd = ['docker', 'buildx', 'inspect', 'airflow_cache']
process = run_command(cmd, verbose=True, text=True)
if process and process.returncode != 0:
next_cmd = ['docker', 'buildx', 'create', '--name', 'airflow_cache']
run_command(next_cmd, verbose=True, text=True, check=False)
else:
build_command_param.extend(["buildx", "build", "--builder", "default", "--progress=tty"])
else:
if prepare_buildx_cache:
console.print(
'\n[red] Buildx cli plugin is not available and you need it to prepare buildx cache. \n'
)
console.print(
'[red] Please install it following https://docs.docker.com/buildx/working-with-buildx/ \n'
)
sys.exit(1)
build_command_param.append("build")
return build_command_param
@lru_cache(maxsize=None)
def commit_sha():
"""Returns commit SHA of current repo. Cached for various usages."""
return run_command(
['git', 'rev-parse', 'HEAD'], capture_output=True, text=True, check=False
).stdout.strip()
def filter_out_none(**kwargs) -> Dict[str, str]:
"""Filters out all None values from parameters passed."""
for key in list(kwargs):
if kwargs[key] is None:
kwargs.pop(key)
return kwargs
|
py | b404a3cad84e894653dc3348d7113593610fd6a9 | # Generated by Django 3.1.1 on 2020-10-01 04:07
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
('body', models.TextField()),
],
),
]
|
py | b404a4dff92897d85f7f47dba49704049826fe9a | from .mocks import MockWeatherClient
__all__ = ["MockWeatherClient"]
|
py | b404a565c0e8824e7d019d65e5b5cb76e40f91eb | # Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Testing concatenate op
"""
import numpy as np
import pytest
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as data_trans
def test_concatenate_op_all():
def gen():
yield (np.array([5., 6., 7., 8.], dtype=np.float),)
prepend_tensor = np.array([1.4, 2., 3., 4., 4.5], dtype=np.float)
append_tensor = np.array([9., 10.3, 11., 12.], dtype=np.float)
data = ds.GeneratorDataset(gen, column_names=["col"])
concatenate_op = data_trans.Concatenate(0, prepend_tensor, append_tensor)
data = data.map(input_columns=["col"], operations=concatenate_op)
expected = np.array([1.4, 2., 3., 4., 4.5, 5., 6., 7., 8., 9., 10.3,
11., 12.])
for data_row in data:
np.testing.assert_array_equal(data_row[0], expected)
def test_concatenate_op_none():
def gen():
yield (np.array([5., 6., 7., 8.], dtype=np.float),)
data = ds.GeneratorDataset(gen, column_names=["col"])
concatenate_op = data_trans.Concatenate()
data = data.map(input_columns=["col"], operations=concatenate_op)
for data_row in data:
np.testing.assert_array_equal(data_row[0], np.array([5., 6., 7., 8.], dtype=np.float))
def test_concatenate_op_string():
def gen():
yield (np.array(["ss", "ad"], dtype='S'),)
prepend_tensor = np.array(["dw", "df"], dtype='S')
append_tensor = np.array(["dwsdf", "df"], dtype='S')
data = ds.GeneratorDataset(gen, column_names=["col"])
concatenate_op = data_trans.Concatenate(0, prepend_tensor, append_tensor)
data = data.map(input_columns=["col"], operations=concatenate_op)
expected = np.array(["dw", "df", "ss", "ad", "dwsdf", "df"], dtype='S')
for data_row in data:
np.testing.assert_array_equal(data_row[0], expected)
def test_concatenate_op_multi_input_string():
prepend_tensor = np.array(["dw", "df"], dtype='S')
append_tensor = np.array(["dwsdf", "df"], dtype='S')
data = ([["1", "2", "d"]], [["3", "4", "e"]])
data = ds.NumpySlicesDataset(data, column_names=["col1", "col2"])
concatenate_op = data_trans.Concatenate(0, prepend=prepend_tensor, append=append_tensor)
data = data.map(input_columns=["col1", "col2"], column_order=["out1"], output_columns=["out1"],
operations=concatenate_op)
expected = np.array(["dw", "df", "1", "2", "d", "3", "4", "e", "dwsdf", "df"], dtype='S')
for data_row in data:
np.testing.assert_array_equal(data_row[0], expected)
def test_concatenate_op_multi_input_numeric():
prepend_tensor = np.array([3, 5])
data = ([[1, 2]], [[3, 4]])
data = ds.NumpySlicesDataset(data, column_names=["col1", "col2"])
concatenate_op = data_trans.Concatenate(0, prepend=prepend_tensor)
data = data.map(input_columns=["col1", "col2"], column_order=["out1"], output_columns=["out1"],
operations=concatenate_op)
expected = np.array([3, 5, 1, 2, 3, 4])
for data_row in data:
np.testing.assert_array_equal(data_row[0], expected)
def test_concatenate_op_type_mismatch():
def gen():
yield (np.array([3, 4], dtype=np.float),)
prepend_tensor = np.array(["ss", "ad"], dtype='S')
data = ds.GeneratorDataset(gen, column_names=["col"])
concatenate_op = data_trans.Concatenate(0, prepend_tensor)
data = data.map(input_columns=["col"], operations=concatenate_op)
with pytest.raises(RuntimeError) as error_info:
for _ in data:
pass
assert "Tensor types do not match" in str(error_info.value)
def test_concatenate_op_type_mismatch2():
def gen():
yield (np.array(["ss", "ad"], dtype='S'),)
prepend_tensor = np.array([3, 5], dtype=np.float)
data = ds.GeneratorDataset(gen, column_names=["col"])
concatenate_op = data_trans.Concatenate(0, prepend_tensor)
data = data.map(input_columns=["col"], operations=concatenate_op)
with pytest.raises(RuntimeError) as error_info:
for _ in data:
pass
assert "Tensor types do not match" in str(error_info.value)
def test_concatenate_op_incorrect_dim():
def gen():
yield (np.array([["ss", "ad"], ["ss", "ad"]], dtype='S'),)
prepend_tensor = np.array(["ss", "ss"], dtype='S')
concatenate_op = data_trans.Concatenate(0, prepend_tensor)
data = ds.GeneratorDataset(gen, column_names=["col"])
data = data.map(input_columns=["col"], operations=concatenate_op)
with pytest.raises(RuntimeError) as error_info:
for _ in data:
pass
assert "Only 1D tensors supported" in str(error_info.value)
def test_concatenate_op_wrong_axis():
with pytest.raises(ValueError) as error_info:
data_trans.Concatenate(2)
assert "only 1D concatenation supported." in str(error_info.value)
def test_concatenate_op_negative_axis():
def gen():
yield (np.array([5., 6., 7., 8.], dtype=np.float),)
prepend_tensor = np.array([1.4, 2., 3., 4., 4.5], dtype=np.float)
append_tensor = np.array([9., 10.3, 11., 12.], dtype=np.float)
data = ds.GeneratorDataset(gen, column_names=["col"])
concatenate_op = data_trans.Concatenate(-1, prepend_tensor, append_tensor)
data = data.map(input_columns=["col"], operations=concatenate_op)
expected = np.array([1.4, 2., 3., 4., 4.5, 5., 6., 7., 8., 9., 10.3,
11., 12.])
for data_row in data:
np.testing.assert_array_equal(data_row[0], expected)
def test_concatenate_op_incorrect_input_dim():
prepend_tensor = np.array([["ss", "ad"], ["ss", "ad"]], dtype='S')
with pytest.raises(ValueError) as error_info:
data_trans.Concatenate(0, prepend_tensor)
assert "can only prepend 1D arrays." in str(error_info.value)
if __name__ == "__main__":
test_concatenate_op_all()
test_concatenate_op_none()
test_concatenate_op_string()
test_concatenate_op_multi_input_string()
test_concatenate_op_multi_input_numeric()
test_concatenate_op_type_mismatch()
test_concatenate_op_type_mismatch2()
test_concatenate_op_incorrect_dim()
test_concatenate_op_negative_axis()
test_concatenate_op_wrong_axis()
test_concatenate_op_incorrect_input_dim()
|
py | b404a6453015efdafb08f7e926a18d518e04dcd6 | from datetime import datetime, timezone
import pandas as pd
import numpy as np
FILE = 'https://raw.githubusercontent.com/AstroMatt/book-python/master/numerical-analysis/data/iris-dirty.csv'
iris = pd.read_csv(FILE, encoding='utf-8')
iris.columns = [
'Sepal length',
'Sepal width',
'Petal length',
'Petal width',
'Species',
]
iris.where(iris['Petal length'] > 2.0, inplace=True)
iris.dropna(how='all', inplace=True)
iris['datetime'] = datetime.now(tz=timezone.utc)
iris['big_enough'] = np.where(iris['Petal width'] > 1.0, True, False)
columns = ['Sepal length', 'Sepal width', 'Species']
iris = iris[columns]
iris.describe()
|
py | b404a70e7dc272f2ee71a5c99f4226879351975c | #!/usr/bin/env python3
# Copyright (c) 2017-2019 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the -uacomment option."""
import re
from test_framework.test_framework import BitcoinTestFramework
from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
class UacommentTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("test multiple -uacomment")
test_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-12:-1]
assert_equal(test_uacomment, "(testnode0)")
self.restart_node(0, ["-uacomment=foo"])
foo_uacomment = self.nodes[0].getnetworkinfo()["subversion"][-17:-1]
assert_equal(foo_uacomment, "(testnode0; foo)")
self.log.info("test -uacomment max length")
self.stop_node(0)
expected = "Error: Total length of network version string \([0-9]+\) exceeds maximum length \(256\). Reduce the number or size of uacomments."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + 'a' * 256], expected, match=ErrorMatch.FULL_REGEX)
self.log.info("test -uacomment unsafe characters")
for unsafe_char in ['/', ':', '(', ')']:
expected = "Error: User Agent comment \(" + re.escape(unsafe_char) + "\) contains unsafe characters."
self.nodes[0].assert_start_raises_init_error(["-uacomment=" + unsafe_char], expected, match=ErrorMatch.FULL_REGEX)
if __name__ == '__main__':
UacommentTest().main()
|
py | b404a8072afa9aa467f50ee312455bfe05cff665 | # coding: utf-8
import pprint
import re
import six
class SessionPersistence:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'cookie_name': 'str',
'type': 'str',
'persistence_timeout': 'int'
}
attribute_map = {
'cookie_name': 'cookie_name',
'type': 'type',
'persistence_timeout': 'persistence_timeout'
}
def __init__(self, cookie_name=None, type=None, persistence_timeout=None):
"""SessionPersistence - a model defined in huaweicloud sdk"""
self._cookie_name = None
self._type = None
self._persistence_timeout = None
self.discriminator = None
if cookie_name is not None:
self.cookie_name = cookie_name
self.type = type
if persistence_timeout is not None:
self.persistence_timeout = persistence_timeout
@property
def cookie_name(self):
"""Gets the cookie_name of this SessionPersistence.
cookie名称。 只有当type为APP_COOKIE时才支持。 格式要求:仅支持字母数字-_.
:return: The cookie_name of this SessionPersistence.
:rtype: str
"""
return self._cookie_name
@cookie_name.setter
def cookie_name(self, cookie_name):
"""Sets the cookie_name of this SessionPersistence.
cookie名称。 只有当type为APP_COOKIE时才支持。 格式要求:仅支持字母数字-_.
:param cookie_name: The cookie_name of this SessionPersistence.
:type: str
"""
self._cookie_name = cookie_name
@property
def type(self):
"""Gets the type of this SessionPersistence.
描述:类型,可以为SOURCE_IP、HTTP_COOKIE、APP_COOKIE。 约束: 1、当pool的protocol为TCP、UDP、QUIC时,只按SOURCE_IP生效; 2、当pool的protocol为HTTP、HTTPS时,只按HTTP_COOKIE、APP_COOKIE生效。
:return: The type of this SessionPersistence.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this SessionPersistence.
描述:类型,可以为SOURCE_IP、HTTP_COOKIE、APP_COOKIE。 约束: 1、当pool的protocol为TCP、UDP、QUIC时,只按SOURCE_IP生效; 2、当pool的protocol为HTTP、HTTPS时,只按HTTP_COOKIE、APP_COOKIE生效。
:param type: The type of this SessionPersistence.
:type: str
"""
self._type = type
@property
def persistence_timeout(self):
"""Gets the persistence_timeout of this SessionPersistence.
会话保持的时间。当type为APP_COOKIE时不生效。 适用范围:如果pool的protocol为TCP、UDP和QUIC则范围为[1,60](分钟),默认值1;如果pool的protocol为HTTP和HTTPS则范围为[1,1440](分钟),默认值1440。
:return: The persistence_timeout of this SessionPersistence.
:rtype: int
"""
return self._persistence_timeout
@persistence_timeout.setter
def persistence_timeout(self, persistence_timeout):
"""Sets the persistence_timeout of this SessionPersistence.
会话保持的时间。当type为APP_COOKIE时不生效。 适用范围:如果pool的protocol为TCP、UDP和QUIC则范围为[1,60](分钟),默认值1;如果pool的protocol为HTTP和HTTPS则范围为[1,1440](分钟),默认值1440。
:param persistence_timeout: The persistence_timeout of this SessionPersistence.
:type: int
"""
self._persistence_timeout = persistence_timeout
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, SessionPersistence):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b404a830988e1fc84bec40cb81e4d64080326716 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
import sys
import unittest
from django import template
from django.contrib.auth.models import Group
from django.core import urlresolvers
from django.template import (base as template_base, loader,
Context, RequestContext, Template, TemplateSyntaxError)
from django.template.engine import Engine
from django.template.loaders import app_directories, filesystem
from django.test import RequestFactory, SimpleTestCase
from django.test.utils import extend_sys_path, ignore_warnings, override_settings
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils._os import upath
TESTS_DIR = os.path.dirname(os.path.dirname(os.path.abspath(upath(__file__))))
TEMPLATES_DIR = os.path.join(TESTS_DIR, 'templates')
class TemplateLoaderTests(SimpleTestCase):
def test_loaders_security(self):
ad_loader = app_directories.Loader(Engine.get_default())
fs_loader = filesystem.Loader(Engine.get_default())
def test_template_sources(path, template_dirs, expected_sources):
if isinstance(expected_sources, list):
# Fix expected sources so they are abspathed
expected_sources = [os.path.abspath(s) for s in expected_sources]
# Test the two loaders (app_directores and filesystem).
func1 = lambda p, t: list(ad_loader.get_template_sources(p, t))
func2 = lambda p, t: list(fs_loader.get_template_sources(p, t))
for func in (func1, func2):
if isinstance(expected_sources, list):
self.assertEqual(func(path, template_dirs), expected_sources)
else:
self.assertRaises(expected_sources, func, path, template_dirs)
template_dirs = ['/dir1', '/dir2']
test_template_sources('index.html', template_dirs,
['/dir1/index.html', '/dir2/index.html'])
test_template_sources('/etc/passwd', template_dirs, [])
test_template_sources('etc/passwd', template_dirs,
['/dir1/etc/passwd', '/dir2/etc/passwd'])
test_template_sources('../etc/passwd', template_dirs, [])
test_template_sources('../../../etc/passwd', template_dirs, [])
test_template_sources('/dir1/index.html', template_dirs,
['/dir1/index.html'])
test_template_sources('../dir2/index.html', template_dirs,
['/dir2/index.html'])
test_template_sources('/dir1blah', template_dirs, [])
test_template_sources('../dir1blah', template_dirs, [])
# UTF-8 bytestrings are permitted.
test_template_sources(b'\xc3\x85ngstr\xc3\xb6m', template_dirs,
['/dir1/Ångström', '/dir2/Ångström'])
# Unicode strings are permitted.
test_template_sources('Ångström', template_dirs,
['/dir1/Ångström', '/dir2/Ångström'])
test_template_sources('Ångström', [b'/Stra\xc3\x9fe'], ['/Straße/Ångström'])
test_template_sources(b'\xc3\x85ngstr\xc3\xb6m', [b'/Stra\xc3\x9fe'],
['/Straße/Ångström'])
# Invalid UTF-8 encoding in bytestrings is not. Should raise a
# semi-useful error message.
test_template_sources(b'\xc3\xc3', template_dirs, UnicodeDecodeError)
# Case insensitive tests (for win32). Not run unless we're on
# a case insensitive operating system.
if os.path.normcase('/TEST') == os.path.normpath('/test'):
template_dirs = ['/dir1', '/DIR2']
test_template_sources('index.html', template_dirs,
['/dir1/index.html', '/DIR2/index.html'])
test_template_sources('/DIR1/index.HTML', template_dirs,
['/DIR1/index.HTML'])
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
}])
# Turn TEMPLATE_DEBUG on, so that the origin file name will be kept with
# the compiled templates.
@override_settings(TEMPLATE_DEBUG=True)
def test_loader_debug_origin(self):
load_name = 'login.html'
# We rely on the fact the file system and app directories loaders both
# inherit the load_template method from the base Loader class, so we
# only need to test one of them.
template = loader.get_template(load_name).template
template_name = template.nodelist[0].source[0].name
self.assertTrue(template_name.endswith(load_name),
'Template loaded by filesystem loader has incorrect name for debug page: %s' % template_name)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATES_DIR],
'OPTIONS': {
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.filesystem.Loader',
]),
],
},
}])
@override_settings(TEMPLATE_DEBUG=True)
def test_cached_loader_debug_origin(self):
load_name = 'login.html'
# Test the cached loader separately since it overrides load_template.
template = loader.get_template(load_name).template
template_name = template.nodelist[0].source[0].name
self.assertTrue(template_name.endswith(load_name),
'Template loaded through cached loader has incorrect name for debug page: %s' % template_name)
template = loader.get_template(load_name).template
template_name = template.nodelist[0].source[0].name
self.assertTrue(template_name.endswith(load_name),
'Cached template loaded through cached loader has incorrect name for debug page: %s' % template_name)
@override_settings(TEMPLATE_DEBUG=True)
def test_loader_origin(self):
template = loader.get_template('login.html')
self.assertEqual(template.origin.loadname, 'login.html')
@override_settings(TEMPLATE_DEBUG=True)
def test_string_origin(self):
template = Template('string template')
self.assertEqual(template.origin.source, 'string template')
def test_debug_false_origin(self):
template = loader.get_template('login.html')
self.assertEqual(template.origin, None)
# TEMPLATE_DEBUG must be true, otherwise the exception raised
# during {% include %} processing will be suppressed.
@override_settings(TEMPLATE_DEBUG=True)
# Test the base loader class via the app loader. load_template
# from base is used by all shipped loaders excepting cached,
# which has its own test.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}])
def test_include_missing_template(self):
"""
Tests that the correct template is identified as not existing
when {% include %} specifies a template that does not exist.
"""
load_name = 'test_include_error.html'
r = None
try:
tmpl = loader.select_template([load_name])
r = tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
# TEMPLATE_DEBUG must be true, otherwise the exception raised
# during {% include %} processing will be suppressed.
@override_settings(TEMPLATE_DEBUG=True)
# Test the base loader class via the app loader. load_template
# from base is used by all shipped loaders excepting cached,
# which has its own test.
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
}])
def test_extends_include_missing_baseloader(self):
"""
Tests that the correct template is identified as not existing
when {% extends %} specifies a template that does exist, but
that template has an {% include %} of something that does not
exist. See #12787.
"""
load_name = 'test_extends_error.html'
tmpl = loader.get_template(load_name)
r = None
try:
r = tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.cached.Loader', [
'django.template.loaders.app_directories.Loader',
]),
],
},
}])
@override_settings(TEMPLATE_DEBUG=True)
def test_extends_include_missing_cachedloader(self):
"""
Same as test_extends_include_missing_baseloader, only tests
behavior of the cached loader instead of base loader.
"""
load_name = 'test_extends_error.html'
tmpl = loader.get_template(load_name)
r = None
try:
r = tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
# For the cached loader, repeat the test, to ensure the first attempt did not cache a
# result that behaves incorrectly on subsequent attempts.
tmpl = loader.get_template(load_name)
try:
tmpl.render(template.Context({}))
except template.TemplateDoesNotExist as e:
self.assertEqual(e.args[0], 'missing.html')
self.assertEqual(r, None, 'Template rendering unexpectedly succeeded, produced: ->%r<-' % r)
def test_include_template_argument(self):
"""
Support any render() supporting object
"""
ctx = Context({
'tmpl': Template('This worked!'),
})
outer_tmpl = Template('{% include tmpl %}')
output = outer_tmpl.render(ctx)
self.assertEqual(output, 'This worked!')
@override_settings(TEMPLATE_DEBUG=True)
def test_include_immediate_missing(self):
"""
Regression test for #16417 -- {% include %} tag raises TemplateDoesNotExist at compile time if TEMPLATE_DEBUG is True
Test that an {% include %} tag with a literal string referencing a
template that does not exist does not raise an exception at parse
time.
"""
tmpl = Template('{% include "this_does_not_exist.html" %}')
self.assertIsInstance(tmpl, Template)
@override_settings(TEMPLATE_DEBUG=True)
def test_include_recursive(self):
comments = [
{
'comment': 'A1',
'children': [
{'comment': 'B1', 'children': []},
{'comment': 'B2', 'children': []},
{'comment': 'B3', 'children': [
{'comment': 'C1', 'children': []}
]},
]
}
]
t = loader.get_template('recursive_include.html')
self.assertEqual(
"Recursion! A1 Recursion! B1 B2 B3 Recursion! C1",
t.render(Context({'comments': comments})).replace(' ', '').replace('\n', ' ').strip(),
)
class TemplateRegressionTests(SimpleTestCase):
def test_token_smart_split(self):
# Regression test for #7027
token = template_base.Token(template_base.TOKEN_BLOCK, 'sometag _("Page not found") value|yesno:_("yes,no")')
split = token.split_contents()
self.assertEqual(split, ["sometag", '_("Page not found")', 'value|yesno:_("yes,no")'])
@override_settings(SETTINGS_MODULE=None, TEMPLATE_DEBUG=True)
def test_url_reverse_no_settings_module(self):
# Regression test for #9005
t = Template('{% url will_not_match %}')
c = Context()
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(c)
@override_settings(
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {'string_if_invalid': '%s is invalid'},
}],
SETTINGS_MODULE='also_something',
)
def test_url_reverse_view_name(self):
# Regression test for #19827
t = Template('{% url will_not_match %}')
c = Context()
try:
t.render(c)
except urlresolvers.NoReverseMatch:
tb = sys.exc_info()[2]
depth = 0
while tb.tb_next is not None:
tb = tb.tb_next
depth += 1
self.assertGreater(depth, 5,
"The traceback context was lost when reraising the traceback. See #19827")
@override_settings(DEBUG=True, TEMPLATE_DEBUG=True)
def test_no_wrapped_exception(self):
"""
The template system doesn't wrap exceptions, but annotates them.
Refs #16770
"""
c = Context({"coconuts": lambda: 42 / 0})
t = Template("{{ coconuts }}")
with self.assertRaises(ZeroDivisionError) as cm:
t.render(c)
self.assertEqual(cm.exception.django_template_source[1], (0, 14))
def test_invalid_block_suggestion(self):
# See #7876
try:
Template("{% if 1 %}lala{% endblock %}{% endif %}")
except TemplateSyntaxError as e:
self.assertEqual(e.args[0], "Invalid block tag: 'endblock', expected 'elif', 'else' or 'endif'")
def test_ifchanged_concurrency(self):
# Tests for #15849
template = Template('[0{% for x in foo %},{% with var=get_value %}{% ifchanged %}{{ var }}{% endifchanged %}{% endwith %}{% endfor %}]')
# Using generator to mimic concurrency.
# The generator is not passed to the 'for' loop, because it does a list(values)
# instead, call gen.next() in the template to control the generator.
def gen():
yield 1
yield 2
# Simulate that another thread is now rendering.
# When the IfChangeNode stores state at 'self' it stays at '3' and skip the last yielded value below.
iter2 = iter([1, 2, 3])
output2 = template.render(Context({'foo': range(3), 'get_value': lambda: next(iter2)}))
self.assertEqual(output2, '[0,1,2,3]', 'Expected [0,1,2,3] in second parallel template, got {}'.format(output2))
yield 3
gen1 = gen()
output1 = template.render(Context({'foo': range(3), 'get_value': lambda: next(gen1)}))
self.assertEqual(output1, '[0,1,2,3]', 'Expected [0,1,2,3] in first template, got {}'.format(output1))
def test_cache_regression_20130(self):
t = Template('{% load cache %}{% cache 1 regression_20130 %}foo{% endcache %}')
cachenode = t.nodelist[1]
self.assertEqual(cachenode.fragment_name, 'regression_20130')
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'default',
},
'template_fragments': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'fragments',
},
})
def test_cache_fragment_cache(self):
"""
When a cache called "template_fragments" is present, the cache tag
will use it in preference to 'default'
"""
t1 = Template('{% load cache %}{% cache 1 fragment %}foo{% endcache %}')
t2 = Template('{% load cache %}{% cache 1 fragment using="default" %}bar{% endcache %}')
ctx = Context()
o1 = t1.render(ctx)
o2 = t2.render(ctx)
self.assertEqual(o1, 'foo')
self.assertEqual(o2, 'bar')
def test_cache_missing_backend(self):
"""
When a cache that doesn't exist is specified, the cache tag will
raise a TemplateSyntaxError
'"""
t = Template('{% load cache %}{% cache 1 backend using="unknown" %}bar{% endcache %}')
ctx = Context()
with self.assertRaises(TemplateSyntaxError):
t.render(ctx)
def test_ifchanged_render_once(self):
""" Test for ticket #19890. The content of ifchanged template tag was
rendered twice."""
template = Template('{% ifchanged %}{% cycle "1st time" "2nd time" %}{% endifchanged %}')
output = template.render(Context({}))
self.assertEqual(output, '1st time')
def test_super_errors(self):
"""
Test behavior of the raise errors into included blocks.
See #18169
"""
t = loader.get_template('included_content.html')
with self.assertRaises(urlresolvers.NoReverseMatch):
t.render(Context({}))
def test_debug_tag_non_ascii(self):
"""
Test non-ASCII model representation in debug output (#23060).
"""
Group.objects.create(name="清風")
c1 = Context({"objs": Group.objects.all()})
t1 = Template('{% debug %}')
self.assertIn("清風", t1.render(c1))
class TemplateTagLoading(SimpleTestCase):
def setUp(self):
self.egg_dir = '%s/eggs' % os.path.dirname(upath(__file__))
def test_load_error(self):
ttext = "{% load broken_tag %}"
self.assertRaises(template.TemplateSyntaxError, template.Template, ttext)
try:
template.Template(ttext)
except template.TemplateSyntaxError as e:
self.assertIn('ImportError', e.args[0])
self.assertIn('Xtemplate', e.args[0])
def test_load_error_egg(self):
ttext = "{% load broken_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.assertRaises(template.TemplateSyntaxError):
with self.settings(INSTALLED_APPS=['tagsegg']):
template.Template(ttext)
try:
with self.settings(INSTALLED_APPS=['tagsegg']):
template.Template(ttext)
except template.TemplateSyntaxError as e:
self.assertIn('ImportError', e.args[0])
self.assertIn('Xtemplate', e.args[0])
def test_load_working_egg(self):
ttext = "{% load working_egg %}"
egg_name = '%s/tagsegg.egg' % self.egg_dir
with extend_sys_path(egg_name):
with self.settings(INSTALLED_APPS=['tagsegg']):
template.Template(ttext)
class RequestContextTests(unittest.TestCase):
def setUp(self):
self.fake_request = RequestFactory().get('/')
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'OPTIONS': {
'loaders': [
('django.template.loaders.locmem.Loader', {
'child': '{{ var|default:"none" }}',
}),
],
},
}])
def test_include_only(self):
"""
Regression test for #15721, ``{% include %}`` and ``RequestContext``
not playing together nicely.
"""
ctx = RequestContext(self.fake_request, {'var': 'parent'})
self.assertEqual(
template.Template('{% include "child" %}').render(ctx),
'parent'
)
self.assertEqual(
template.Template('{% include "child" only %}').render(ctx),
'none'
)
def test_stack_size(self):
"""
Regression test for #7116, Optimize RequetsContext construction
"""
ctx = RequestContext(self.fake_request, {})
# The stack should now contain 3 items:
# [builtins, supplied context, context processor]
self.assertEqual(len(ctx.dicts), 3)
def test_context_comparable(self):
# Create an engine without any context processors.
engine = Engine()
test_data = {'x': 'y', 'v': 'z', 'd': {'o': object, 'a': 'b'}}
# test comparing RequestContext to prevent problems if somebody
# adds __eq__ in the future
request = RequestFactory().get('/')
self.assertEqual(
RequestContext(request, dict_=test_data, engine=engine),
RequestContext(request, dict_=test_data, engine=engine))
@ignore_warnings(category=RemovedInDjango20Warning)
class SSITests(SimpleTestCase):
def setUp(self):
self.this_dir = os.path.dirname(os.path.abspath(upath(__file__)))
self.ssi_dir = os.path.join(self.this_dir, "templates", "first")
self.engine = Engine(allowed_include_roots=(self.ssi_dir,))
def render_ssi(self, path):
# the path must exist for the test to be reliable
self.assertTrue(os.path.exists(path))
return self.engine.from_string('{%% ssi "%s" %%}' % path).render(Context({}))
def test_allowed_paths(self):
acceptable_path = os.path.join(self.ssi_dir, "..", "first", "test.html")
self.assertEqual(self.render_ssi(acceptable_path), 'First template\n')
def test_relative_include_exploit(self):
"""
May not bypass allowed_include_roots with relative paths
e.g. if allowed_include_roots = ("/var/www",), it should not be
possible to do {% ssi "/var/www/../../etc/passwd" %}
"""
disallowed_paths = [
os.path.join(self.ssi_dir, "..", "ssi_include.html"),
os.path.join(self.ssi_dir, "..", "second", "test.html"),
]
for disallowed_path in disallowed_paths:
self.assertEqual(self.render_ssi(disallowed_path), '')
|
py | b404a89bc295eddde0450aac967fc382475b570a | # qubit number=4
# total number=14
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += Z(3) # number=7
prog += Z(1) # number=8
prog += H(2) # number=3
prog += H(3) # number=4
prog += SWAP(3,0) # number=5
prog += SWAP(3,0) # number=6
prog += CNOT(0,3) # number=11
prog += X(3) # number=12
prog += CNOT(0,3) # number=13
prog += X(3) # number=10
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('4q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil700.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
py | b404a945077db61cd742ea6219081d47c502fd7a | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
def print_color(code: int, message: str) -> None:
print(f"\033[{code}m{message}\033[00m")
def print_green(message: str) -> None:
print_color(92, message)
def print_yellow(message: str) -> None:
print_color(93, message)
def print_cyan(message: str) -> None:
print_color(96, message)
def print_red(message: str) -> None:
print_color(91, message)
def snake_to_camelcase(name: str) -> str:
"""Convert snake-case string to camel-case string."""
return "".join(n.capitalize() for n in name.split("_"))
|
py | b404a9703288f073e3a64f4e6f8d13e3a4716992 | # -*- coding:utf-8 -*-
# @Date: "2021-07-14"
# @Description: 排行数据Dict
import sys
import random
from typing import Hashable
SKIPLIST_P = 0.25
MAX_LEVEL = 32
def skRandomLevel():
lvl = 1
while(random.random() < SKIPLIST_P and lvl < MAX_LEVEL):
lvl += 1
return lvl
class SkipListLevel(object):
def __init__(self):
super(SkipListLevel, self).__init__()
self.forward = None
self.span = 0
class SkipListNode(object):
def __init__(self, level, value):
super(SkipListNode, self).__init__()
self.value = value
self.backward = None
self.levels = [SkipListLevel() for _ in level]
class SkipList(object):
def __init__(self):
super(SkipList, self).__init__()
self._head = SkipListNode(MAX_LEVEL, None)
self._tail = None
self._lenth = 0
self._level = 0
class RankDict(object):
def __init__(self):
super(RankDict, self).__init__()
def insert(self, searchKey, value):
'''
@param k : 可重复
@param v : 不可重复
'''
if not Hashable(searchKey):
raise KeyError('@key must be hashable.')
pass
def update(self, k, v):
pass
def rank(self, k):
pass
def rankByVal(self, val):
pass
def range(self, start, stop):
pass
def rangeByVal(self, start, stop):
pass
if __name__ == '__main__':
cnt = 1000000
sd = {}
for _ in range(cnt):
r = skRandomLevel()
if r not in sd:
sd[r] = 1
else:
sd[r] += 1
print(sd)
for k, v in sd.items():
sd[k] = v*1.0/cnt
print(sd) |
py | b404aaf863681a70005ef32f2c7f62dc4e5d1f7a | import socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((socket.gethostname(), 1234))
#s.connect(('10.84.14.54', 1234))
msg = s.recv(1024)
print(msg.decode("utf-8"))
|
py | b404ad4e2a492f334947798388c0cef0b6e46b80 | import json
import logging
from io import SEEK_END, BytesIO
import attr
from zope.interface import implementer
from twisted.internet import address, threads, udp
from twisted.internet._resolver import SimpleResolverComplexifier
from twisted.internet.defer import Deferred, fail, succeed
from twisted.internet.error import DNSLookupError
from twisted.internet.interfaces import (
IReactorPluggableNameResolver,
IReactorTCP,
IResolverSimple,
)
from twisted.python.failure import Failure
from twisted.test.proto_helpers import AccumulatingProtocol, MemoryReactorClock
from twisted.web.http import unquote
from twisted.web.http_headers import Headers
from twisted.web.server import Site
from synapse.http.site import SynapseRequest
from synapse.util import Clock
from tests.utils import setup_test_homeserver as _sth
logger = logging.getLogger(__name__)
class TimedOutException(Exception):
"""
A web query timed out.
"""
@attr.s
class FakeChannel:
"""
A fake Twisted Web Channel (the part that interfaces with the
wire).
"""
site = attr.ib(type=Site)
_reactor = attr.ib()
result = attr.ib(default=attr.Factory(dict))
_producer = None
@property
def json_body(self):
if not self.result:
raise Exception("No result yet.")
return json.loads(self.result["body"].decode("utf8"))
@property
def code(self):
if not self.result:
raise Exception("No result yet.")
return int(self.result["code"])
@property
def headers(self):
if not self.result:
raise Exception("No result yet.")
h = Headers()
for i in self.result["headers"]:
h.addRawHeader(*i)
return h
def writeHeaders(self, version, code, reason, headers):
self.result["version"] = version
self.result["code"] = code
self.result["reason"] = reason
self.result["headers"] = headers
def write(self, content):
assert isinstance(content, bytes), "Should be bytes! " + repr(content)
if "body" not in self.result:
self.result["body"] = b""
self.result["body"] += content
def registerProducer(self, producer, streaming):
self._producer = producer
self.producerStreaming = streaming
def _produce():
if self._producer:
self._producer.resumeProducing()
self._reactor.callLater(0.1, _produce)
if not streaming:
self._reactor.callLater(0.0, _produce)
def unregisterProducer(self):
if self._producer is None:
return
self._producer = None
def requestDone(self, _self):
self.result["done"] = True
def getPeer(self):
# We give an address so that getClientIP returns a non null entry,
# causing us to record the MAU
return address.IPv4Address("TCP", "127.0.0.1", 3423)
def getHost(self):
return None
@property
def transport(self):
return self
class FakeSite:
"""
A fake Twisted Web Site, with mocks of the extra things that
Synapse adds.
"""
server_version_string = b"1"
site_tag = "test"
access_logger = logging.getLogger("synapse.access.http.fake")
def make_request(
reactor,
method,
path,
content=b"",
access_token=None,
request=SynapseRequest,
shorthand=True,
federation_auth_origin=None,
content_is_form=False,
):
"""
Make a web request using the given method and path, feed it the
content, and return the Request and the Channel underneath.
Args:
method (bytes/unicode): The HTTP request method ("verb").
path (bytes/unicode): The HTTP path, suitably URL encoded (e.g.
escaped UTF-8 & spaces and such).
content (bytes or dict): The body of the request. JSON-encoded, if
a dict.
shorthand: Whether to try and be helpful and prefix the given URL
with the usual REST API path, if it doesn't contain it.
federation_auth_origin (bytes|None): if set to not-None, we will add a fake
Authorization header pretenting to be the given server name.
content_is_form: Whether the content is URL encoded form data. Adds the
'Content-Type': 'application/x-www-form-urlencoded' header.
Returns:
Tuple[synapse.http.site.SynapseRequest, channel]
"""
if not isinstance(method, bytes):
method = method.encode("ascii")
if not isinstance(path, bytes):
path = path.encode("ascii")
# Decorate it to be the full path, if we're using shorthand
if (
shorthand
and not path.startswith(b"/_matrix")
and not path.startswith(b"/_synapse")
):
path = b"/_matrix/client/r0/" + path
path = path.replace(b"//", b"/")
if not path.startswith(b"/"):
path = b"/" + path
if isinstance(content, str):
content = content.encode("utf8")
site = FakeSite()
channel = FakeChannel(site, reactor)
req = request(channel)
req.process = lambda: b""
req.content = BytesIO(content)
# Twisted expects to be at the end of the content when parsing the request.
req.content.seek(SEEK_END)
req.postpath = list(map(unquote, path[1:].split(b"/")))
if access_token:
req.requestHeaders.addRawHeader(
b"Authorization", b"Bearer " + access_token.encode("ascii")
)
if federation_auth_origin is not None:
req.requestHeaders.addRawHeader(
b"Authorization",
b"X-Matrix origin=%s,key=,sig=" % (federation_auth_origin,),
)
if content:
if content_is_form:
req.requestHeaders.addRawHeader(
b"Content-Type", b"application/x-www-form-urlencoded"
)
else:
# Assume the body is JSON
req.requestHeaders.addRawHeader(b"Content-Type", b"application/json")
req.requestReceived(method, path, b"1.1")
return req, channel
def wait_until_result(clock, request, timeout=100):
"""
Wait until the request is finished.
"""
clock.run()
x = 0
while not request.finished:
# If there's a producer, tell it to resume producing so we get content
if request._channel._producer:
request._channel._producer.resumeProducing()
x += 1
if x > timeout:
raise TimedOutException("Timed out waiting for request to finish.")
clock.advance(0.1)
def render(request, resource, clock):
request.render(resource)
wait_until_result(clock, request)
@implementer(IReactorPluggableNameResolver)
class ThreadedMemoryReactorClock(MemoryReactorClock):
"""
A MemoryReactorClock that supports callFromThread.
"""
def __init__(self):
self.threadpool = ThreadPool(self)
self._tcp_callbacks = {}
self._udp = []
lookups = self.lookups = {}
@implementer(IResolverSimple)
class FakeResolver:
def getHostByName(self, name, timeout=None):
if name not in lookups:
return fail(DNSLookupError("OH NO: unknown %s" % (name,)))
return succeed(lookups[name])
self.nameResolver = SimpleResolverComplexifier(FakeResolver())
super().__init__()
def listenUDP(self, port, protocol, interface="", maxPacketSize=8196):
p = udp.Port(port, protocol, interface, maxPacketSize, self)
p.startListening()
self._udp.append(p)
return p
def callFromThread(self, callback, *args, **kwargs):
"""
Make the callback fire in the next reactor iteration.
"""
d = Deferred()
d.addCallback(lambda x: callback(*args, **kwargs))
self.callLater(0, d.callback, True)
return d
def getThreadPool(self):
return self.threadpool
def add_tcp_client_callback(self, host, port, callback):
"""Add a callback that will be invoked when we receive a connection
attempt to the given IP/port using `connectTCP`.
Note that the callback gets run before we return the connection to the
client, which means callbacks cannot block while waiting for writes.
"""
self._tcp_callbacks[(host, port)] = callback
def connectTCP(self, host, port, factory, timeout=30, bindAddress=None):
"""Fake L{IReactorTCP.connectTCP}.
"""
conn = super().connectTCP(
host, port, factory, timeout=timeout, bindAddress=None
)
callback = self._tcp_callbacks.get((host, port))
if callback:
callback()
return conn
class ThreadPool:
"""
Threadless thread pool.
"""
def __init__(self, reactor):
self._reactor = reactor
def start(self):
pass
def stop(self):
pass
def callInThreadWithCallback(self, onResult, function, *args, **kwargs):
def _(res):
if isinstance(res, Failure):
onResult(False, res)
else:
onResult(True, res)
d = Deferred()
d.addCallback(lambda x: function(*args, **kwargs))
d.addBoth(_)
self._reactor.callLater(0, d.callback, True)
return d
def setup_test_homeserver(cleanup_func, *args, **kwargs):
"""
Set up a synchronous test server, driven by the reactor used by
the homeserver.
"""
server = _sth(cleanup_func, *args, **kwargs)
database = server.config.database.get_single_database()
# Make the thread pool synchronous.
clock = server.get_clock()
for database in server.get_datastores().databases:
pool = database._db_pool
def runWithConnection(func, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runWithConnection,
func,
*args,
**kwargs
)
def runInteraction(interaction, *args, **kwargs):
return threads.deferToThreadPool(
pool._reactor,
pool.threadpool,
pool._runInteraction,
interaction,
*args,
**kwargs
)
pool.runWithConnection = runWithConnection
pool.runInteraction = runInteraction
pool.threadpool = ThreadPool(clock._reactor)
pool.running = True
return server
def get_clock():
clock = ThreadedMemoryReactorClock()
hs_clock = Clock(clock)
return clock, hs_clock
@attr.s(cmp=False)
class FakeTransport:
"""
A twisted.internet.interfaces.ITransport implementation which sends all its data
straight into an IProtocol object: it exists to connect two IProtocols together.
To use it, instantiate it with the receiving IProtocol, and then pass it to the
sending IProtocol's makeConnection method:
server = HTTPChannel()
client.makeConnection(FakeTransport(server, self.reactor))
If you want bidirectional communication, you'll need two instances.
"""
other = attr.ib()
"""The Protocol object which will receive any data written to this transport.
:type: twisted.internet.interfaces.IProtocol
"""
_reactor = attr.ib()
"""Test reactor
:type: twisted.internet.interfaces.IReactorTime
"""
_protocol = attr.ib(default=None)
"""The Protocol which is producing data for this transport. Optional, but if set
will get called back for connectionLost() notifications etc.
"""
disconnecting = False
disconnected = False
connected = True
buffer = attr.ib(default=b"")
producer = attr.ib(default=None)
autoflush = attr.ib(default=True)
def getPeer(self):
return None
def getHost(self):
return None
def loseConnection(self, reason=None):
if not self.disconnecting:
logger.info("FakeTransport: loseConnection(%s)", reason)
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(reason)
# if we still have data to write, delay until that is done
if self.buffer:
logger.info(
"FakeTransport: Delaying disconnect until buffer is flushed"
)
else:
self.connected = False
self.disconnected = True
def abortConnection(self):
logger.info("FakeTransport: abortConnection()")
if not self.disconnecting:
self.disconnecting = True
if self._protocol:
self._protocol.connectionLost(None)
self.disconnected = True
def pauseProducing(self):
if not self.producer:
return
self.producer.pauseProducing()
def resumeProducing(self):
if not self.producer:
return
self.producer.resumeProducing()
def unregisterProducer(self):
if not self.producer:
return
self.producer = None
def registerProducer(self, producer, streaming):
self.producer = producer
self.producerStreaming = streaming
def _produce():
d = self.producer.resumeProducing()
d.addCallback(lambda x: self._reactor.callLater(0.1, _produce))
if not streaming:
self._reactor.callLater(0.0, _produce)
def write(self, byt):
if self.disconnecting:
raise Exception("Writing to disconnecting FakeTransport")
self.buffer = self.buffer + byt
# always actually do the write asynchronously. Some protocols (notably the
# TLSMemoryBIOProtocol) get very confused if a read comes back while they are
# still doing a write. Doing a callLater here breaks the cycle.
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
def writeSequence(self, seq):
for x in seq:
self.write(x)
def flush(self, maxbytes=None):
if not self.buffer:
# nothing to do. Don't write empty buffers: it upsets the
# TLSMemoryBIOProtocol
return
if self.disconnected:
return
if getattr(self.other, "transport") is None:
# the other has no transport yet; reschedule
if self.autoflush:
self._reactor.callLater(0.0, self.flush)
return
if maxbytes is not None:
to_write = self.buffer[:maxbytes]
else:
to_write = self.buffer
logger.info("%s->%s: %s", self._protocol, self.other, to_write)
try:
self.other.dataReceived(to_write)
except Exception as e:
logger.exception("Exception writing to protocol: %s", e)
return
self.buffer = self.buffer[len(to_write) :]
if self.buffer and self.autoflush:
self._reactor.callLater(0.0, self.flush)
if not self.buffer and self.disconnecting:
logger.info("FakeTransport: Buffer now empty, completing disconnect")
self.disconnected = True
def connect_client(reactor: IReactorTCP, client_id: int) -> AccumulatingProtocol:
"""
Connect a client to a fake TCP transport.
Args:
reactor
factory: The connecting factory to build.
"""
factory = reactor.tcpClients[client_id][2]
client = factory.buildProtocol(None)
server = AccumulatingProtocol()
server.makeConnection(FakeTransport(client, reactor))
client.makeConnection(FakeTransport(server, reactor))
reactor.tcpClients.pop(client_id)
return client, server
|
py | b404add57bc4ba29ce7f0a16c4d69eb20ad03f75 | from __future__ import division, absolute_import
import copy
import numpy as np
import random
from collections import defaultdict
from torch.utils.data.sampler import Sampler, RandomSampler, SequentialSampler
AVAI_SAMPLERS = ['RandomIdentitySampler', 'SequentialSampler', 'RandomSampler']
class RandomIdentitySampler(Sampler):
"""Randomly samples N identities each with K instances.
Args:
data_source (list): contains tuples of (img_path(s), pid, camid).
batch_size (int): batch size.
num_instances (int): number of instances per identity in a batch.
"""
def __init__(self, data_source, batch_size, num_instances):
if batch_size < num_instances:
raise ValueError(
'batch_size={} must be no less '
'than num_instances={}'.format(batch_size, num_instances)
)
self.data_source = data_source
self.batch_size = batch_size
self.num_instances = num_instances
self.num_pids_per_batch = self.batch_size // self.num_instances
self.index_dic = defaultdict(list)
for index, (_, pid, _) in enumerate(self.data_source):
self.index_dic[pid].append(index)
self.pids = list(self.index_dic.keys())
# estimate number of examples in an epoch
# TODO: improve precision
self.length = 0
for pid in self.pids:
idxs = self.index_dic[pid]
num = len(idxs)
if num < self.num_instances:
num = self.num_instances
self.length += num - num % self.num_instances
def __iter__(self):
batch_idxs_dict = defaultdict(list)
for pid in self.pids:
idxs = copy.deepcopy(self.index_dic[pid])
if len(idxs) < self.num_instances:
idxs = np.random.choice(
idxs, size=self.num_instances, replace=True
)
random.shuffle(idxs)
batch_idxs = []
for idx in idxs:
batch_idxs.append(idx)
if len(batch_idxs) == self.num_instances:
batch_idxs_dict[pid].append(batch_idxs)
batch_idxs = []
avai_pids = copy.deepcopy(self.pids)
final_idxs = []
while len(avai_pids) >= self.num_pids_per_batch:
selected_pids = random.sample(avai_pids, self.num_pids_per_batch)
for pid in selected_pids:
batch_idxs = batch_idxs_dict[pid].pop(0)
final_idxs.extend(batch_idxs)
if len(batch_idxs_dict[pid]) == 0:
avai_pids.remove(pid)
return iter(final_idxs)
def __len__(self):
return self.length
def build_train_sampler(
data_source, train_sampler, batch_size=32, num_instances=4, **kwargs
):
"""Builds a training sampler.
Args:
data_source (list): contains tuples of (img_path(s), pid, camid).
train_sampler (str): sampler name (default: ``RandomSampler``).
batch_size (int, optional): batch size. Default is 32.
num_instances (int, optional): number of instances per identity in a
batch (when using ``RandomIdentitySampler``). Default is 4.
"""
assert train_sampler in AVAI_SAMPLERS, \
'train_sampler must be one of {}, but got {}'.format(AVAI_SAMPLERS, train_sampler)
if train_sampler == 'RandomIdentitySampler':
sampler = RandomIdentitySampler(data_source, batch_size, num_instances)
elif train_sampler == 'SequentialSampler':
sampler = SequentialSampler(data_source)
elif train_sampler == 'RandomSampler':
sampler = RandomSampler(data_source)
return sampler
|
py | b404addf0a98f590183f48a232fc59f1e28a7a6c | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from __future__ import absolute_import, division, print_function
INCLUDES = """
#include <openssl/obj_mac.h>
"""
TYPES = """
static const int NID_undef;
static const int NID_dsa;
static const int NID_dsaWithSHA;
static const int NID_dsaWithSHA1;
static const int NID_md2;
static const int NID_md4;
static const int NID_md5;
static const int NID_mdc2;
static const int NID_ripemd160;
static const int NID_sha;
static const int NID_sha1;
static const int NID_sha256;
static const int NID_sha384;
static const int NID_sha512;
static const int NID_sha224;
static const int NID_sha;
static const int NID_ecdsa_with_SHA1;
static const int NID_ecdsa_with_SHA224;
static const int NID_ecdsa_with_SHA256;
static const int NID_ecdsa_with_SHA384;
static const int NID_ecdsa_with_SHA512;
static const int NID_pbe_WithSHA1And3_Key_TripleDES_CBC;
static const int NID_X9_62_c2pnb163v1;
static const int NID_X9_62_c2pnb163v2;
static const int NID_X9_62_c2pnb163v3;
static const int NID_X9_62_c2pnb176v1;
static const int NID_X9_62_c2tnb191v1;
static const int NID_X9_62_c2tnb191v2;
static const int NID_X9_62_c2tnb191v3;
static const int NID_X9_62_c2onb191v4;
static const int NID_X9_62_c2onb191v5;
static const int NID_X9_62_c2pnb208w1;
static const int NID_X9_62_c2tnb239v1;
static const int NID_X9_62_c2tnb239v2;
static const int NID_X9_62_c2tnb239v3;
static const int NID_X9_62_c2onb239v4;
static const int NID_X9_62_c2onb239v5;
static const int NID_X9_62_c2pnb272w1;
static const int NID_X9_62_c2pnb304w1;
static const int NID_X9_62_c2tnb359v1;
static const int NID_X9_62_c2pnb368w1;
static const int NID_X9_62_c2tnb431r1;
static const int NID_X9_62_prime192v1;
static const int NID_X9_62_prime192v2;
static const int NID_X9_62_prime192v3;
static const int NID_X9_62_prime239v1;
static const int NID_X9_62_prime239v2;
static const int NID_X9_62_prime239v3;
static const int NID_X9_62_prime256v1;
static const int NID_secp112r1;
static const int NID_secp112r2;
static const int NID_secp128r1;
static const int NID_secp128r2;
static const int NID_secp160k1;
static const int NID_secp160r1;
static const int NID_secp160r2;
static const int NID_sect163k1;
static const int NID_sect163r1;
static const int NID_sect163r2;
static const int NID_secp192k1;
static const int NID_secp224k1;
static const int NID_secp224r1;
static const int NID_secp256k1;
static const int NID_secp384r1;
static const int NID_secp521r1;
static const int NID_sect113r1;
static const int NID_sect113r2;
static const int NID_sect131r1;
static const int NID_sect131r2;
static const int NID_sect193r1;
static const int NID_sect193r2;
static const int NID_sect233k1;
static const int NID_sect233r1;
static const int NID_sect239k1;
static const int NID_sect283k1;
static const int NID_sect283r1;
static const int NID_sect409k1;
static const int NID_sect409r1;
static const int NID_sect571k1;
static const int NID_sect571r1;
static const int NID_wap_wsg_idm_ecid_wtls1;
static const int NID_wap_wsg_idm_ecid_wtls3;
static const int NID_wap_wsg_idm_ecid_wtls4;
static const int NID_wap_wsg_idm_ecid_wtls5;
static const int NID_wap_wsg_idm_ecid_wtls6;
static const int NID_wap_wsg_idm_ecid_wtls7;
static const int NID_wap_wsg_idm_ecid_wtls8;
static const int NID_wap_wsg_idm_ecid_wtls9;
static const int NID_wap_wsg_idm_ecid_wtls10;
static const int NID_wap_wsg_idm_ecid_wtls11;
static const int NID_wap_wsg_idm_ecid_wtls12;
static const int NID_ipsec3;
static const int NID_ipsec4;
static const char *const SN_X9_62_c2pnb163v1;
static const char *const SN_X9_62_c2pnb163v2;
static const char *const SN_X9_62_c2pnb163v3;
static const char *const SN_X9_62_c2pnb176v1;
static const char *const SN_X9_62_c2tnb191v1;
static const char *const SN_X9_62_c2tnb191v2;
static const char *const SN_X9_62_c2tnb191v3;
static const char *const SN_X9_62_c2onb191v4;
static const char *const SN_X9_62_c2onb191v5;
static const char *const SN_X9_62_c2pnb208w1;
static const char *const SN_X9_62_c2tnb239v1;
static const char *const SN_X9_62_c2tnb239v2;
static const char *const SN_X9_62_c2tnb239v3;
static const char *const SN_X9_62_c2onb239v4;
static const char *const SN_X9_62_c2onb239v5;
static const char *const SN_X9_62_c2pnb272w1;
static const char *const SN_X9_62_c2pnb304w1;
static const char *const SN_X9_62_c2tnb359v1;
static const char *const SN_X9_62_c2pnb368w1;
static const char *const SN_X9_62_c2tnb431r1;
static const char *const SN_X9_62_prime192v1;
static const char *const SN_X9_62_prime192v2;
static const char *const SN_X9_62_prime192v3;
static const char *const SN_X9_62_prime239v1;
static const char *const SN_X9_62_prime239v2;
static const char *const SN_X9_62_prime239v3;
static const char *const SN_X9_62_prime256v1;
static const char *const SN_secp112r1;
static const char *const SN_secp112r2;
static const char *const SN_secp128r1;
static const char *const SN_secp128r2;
static const char *const SN_secp160k1;
static const char *const SN_secp160r1;
static const char *const SN_secp160r2;
static const char *const SN_sect163k1;
static const char *const SN_sect163r1;
static const char *const SN_sect163r2;
static const char *const SN_secp192k1;
static const char *const SN_secp224k1;
static const char *const SN_secp224r1;
static const char *const SN_secp256k1;
static const char *const SN_secp384r1;
static const char *const SN_secp521r1;
static const char *const SN_sect113r1;
static const char *const SN_sect113r2;
static const char *const SN_sect131r1;
static const char *const SN_sect131r2;
static const char *const SN_sect193r1;
static const char *const SN_sect193r2;
static const char *const SN_sect233k1;
static const char *const SN_sect233r1;
static const char *const SN_sect239k1;
static const char *const SN_sect283k1;
static const char *const SN_sect283r1;
static const char *const SN_sect409k1;
static const char *const SN_sect409r1;
static const char *const SN_sect571k1;
static const char *const SN_sect571r1;
static const char *const SN_wap_wsg_idm_ecid_wtls1;
static const char *const SN_wap_wsg_idm_ecid_wtls3;
static const char *const SN_wap_wsg_idm_ecid_wtls4;
static const char *const SN_wap_wsg_idm_ecid_wtls5;
static const char *const SN_wap_wsg_idm_ecid_wtls6;
static const char *const SN_wap_wsg_idm_ecid_wtls7;
static const char *const SN_wap_wsg_idm_ecid_wtls8;
static const char *const SN_wap_wsg_idm_ecid_wtls9;
static const char *const SN_wap_wsg_idm_ecid_wtls10;
static const char *const SN_wap_wsg_idm_ecid_wtls11;
static const char *const SN_wap_wsg_idm_ecid_wtls12;
static const char *const SN_ipsec3;
static const char *const SN_ipsec4;
static const int NID_subject_key_identifier;
static const int NID_authority_key_identifier;
static const int NID_policy_constraints;
static const int NID_ext_key_usage;
static const int NID_info_access;
static const int NID_key_usage;
static const int NID_subject_alt_name;
static const int NID_issuer_alt_name;
static const int NID_basic_constraints;
static const int NID_issuing_distribution_point;
static const int NID_certificate_issuer;
static const int NID_name_constraints;
static const int NID_crl_distribution_points;
static const int NID_certificate_policies;
static const int NID_inhibit_any_policy;
static const int NID_private_key_usage_period;
static const int NID_crl_number;
static const int NID_crl_reason;
static const int NID_invalidity_date;
static const int NID_delta_crl;
static const int NID_any_policy;
static const int NID_policy_mappings;
static const int NID_target_information;
static const int NID_no_rev_avail;
static const int NID_commonName;
static const int NID_countryName;
static const int NID_localityName;
static const int NID_stateOrProvinceName;
static const int NID_organizationName;
static const int NID_organizationalUnitName;
static const int NID_serialNumber;
static const int NID_surname;
static const int NID_givenName;
static const int NID_title;
static const int NID_generationQualifier;
static const int NID_dnQualifier;
static const int NID_pseudonym;
static const int NID_domainComponent;
static const int NID_pkcs9_emailAddress;
"""
FUNCTIONS = """
"""
MACROS = """
"""
CUSTOMIZATIONS = """
"""
|
py | b404ae746d685317c6e9ac4c453eb9366f6268f7 | import imutils
import cv2
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH,300)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,300)
cap1 = cv2.VideoCapture(1)
cap1.set(cv2.CAP_PROP_FRAME_WIDTH,300)
cap1.set(cv2.CAP_PROP_FRAME_HEIGHT,300)
cap2 = cv2.VideoCapture(2)
cap2.set(cv2.CAP_PROP_FRAME_WIDTH,300)
cap2.set(cv2.CAP_PROP_FRAME_HEIGHT,300)
cap3 = cv2.VideoCapture(3)
cap3.set(cv2.CAP_PROP_FRAME_WIDTH,300)
cap3.set(cv2.CAP_PROP_FRAME_HEIGHT,300)
def read_frame():
webCameShow(cap.read(),display1)
webCameShow(cap1.read(),display2)
webCameShow(cap2.read(),display6)
webCameShow(cap3.read(),display7)
window.after(10, read_frame)
def webCameShow(N,Display):
_, frameXX = N
cv2imageXX = cv2.cvtColor(frameXX, cv2.COLOR_BGR2RGBA)
imgXX = Image.fromarray(cv2imageXX)
#imgtkXX = ImageTk.PhotoImage(image=imgXX)
Display.imgtk = imgtkXX
Display.configure(image=imgtkXX)
def main():
read_frame()
if __name__ == '__main__':
main()
|
py | b404aea1dfc6512b6d9824bdbad38a6f97399f09 | import ipaddress
import os
import pathlib
from copy import copy
from typing import List, Optional, TypeVar, Union
import six
import yaml
from .domain import Domain
from .encrypted_string import EncryptedString
from .get_my_ip import get_my_ipv4, get_my_ipv6
from .record import Record, RecordType
T = TypeVar('T')
if hasattr(os, 'getenvb'):
def _getenvb(varname: str, value: T = None) -> Union[Optional[T], bytes]:
return os.getenvb(varname.encode(), value)
else:
def _getenvb(varname: str, value: T = None) -> Union[Optional[T], bytes]:
res = os.getenv(varname, value)
if res is value:
return value
return res.encode() if isinstance(res, str) else res
class Config(dict):
def __init__(self, src="freenom.yml", **kwargs):
super().__init__(**kwargs)
if isinstance(src, pathlib.Path):
src = str(src)
self._records = None
self._password: EncryptedString = EncryptedString(b"")
self.reload(src)
self.file = src
def reload(self, src):
if isinstance(src, dict):
content = src
elif hasattr(src, 'read'):
content = yaml.safe_load(src)
else:
with open(src) as f:
content = yaml.safe_load(f)
self.clear()
self._password = EncryptedString(content.pop("password", ""),
key=_getenvb("FDU_KEY"),
iv=_getenvb("FDU_IV")).ensure_encrypted()
self.update(content)
self._records = None
def save(self, file=None):
file = file or self.file
if isinstance(file, six.string_types):
with open(file, 'w') as f:
d = dict(self)
d["password"] = self._password.str()
yaml.dump(d, f)
return True
return False
@property
def login(self) -> str:
return self['login']
@property
def password(self) -> str:
return self._password.str()
def __eq__(self, other):
if self.password != other.password:
return False
return super().__eq__(other)
def __ne__(self, other):
return not self == other
@property
def records(self) -> List[Record]:
if self._records is not None:
return self._records
records = self['record']
if isinstance(records, dict):
records = [records]
ret = []
ipv4 = None
ipv6 = None
if records:
ipv4 = get_my_ipv4()
try:
ipv6 = get_my_ipv6()
except Exception:
ipv6 = None
for rec in records:
ret += self._parse_record(rec, str(ipv4), str(ipv6) if ipv6 else None)
self._records = ret
return ret
def _parse_record(self, raw_record: dict, ipv4: Optional[str], ipv6: Optional[str]) -> List[Record]:
domain_name = raw_record['domain']
if not isinstance(domain_name, six.string_types):
raise TypeError("domain's name must be a string")
domain_name = domain_name.strip().lower()
if not domain_name:
raise ValueError("empty domain name")
domain = Domain()
domain.name = domain_name
record = Record(domain=domain)
optional_record = None
tmp = raw_record.get('name')
if tmp is not None:
record.name = str(tmp)
if not record.name:
raise ValueError("empty record name")
type_given = False
if 'type' in raw_record:
record.type = raw_record['type']
type_given = True
target_given = False
tmp = raw_record.get('target')
if tmp is not None:
record.target = str(tmp).strip()
target_given = True
if 'ttl' in raw_record:
record.ttl = raw_record['ttl']
if target_given and record.target != 'auto':
try:
addr = ipaddress.ip_address(six.u(record.target))
except ValueError:
pass
else:
if isinstance(addr, ipaddress.IPv4Address):
if type_given:
if record.type == RecordType.AAAA:
raise ValueError("cannot use ipv4 for AAAA record")
else:
record.type = RecordType.A
elif isinstance(addr, ipaddress.IPv6Address):
if type_given:
if record.type == RecordType.A:
raise ValueError("cannot use ipv6 for A record")
else:
record.type = RecordType.AAAA
else: # target not given or target is 'auto'
if type_given:
if record.type == RecordType.AAAA:
if not ipv6:
raise ValueError("empty ipv6")
record.target = ipv6
elif record.type == RecordType.A:
if not ipv4:
raise ValueError("empty ipv4")
record.target = ipv4
else: # type not given
if not ipv4:
raise ValueError("empty ipv4")
record.type = RecordType.A
record.target = ipv4
if ipv6 is not None:
optional_record = copy(record)
optional_record.type = RecordType.AAAA
optional_record.target = ipv6
return [record] if optional_record is None else [record, optional_record]
|
py | b404b1a2d57439c85a253dd1c392078131caae34 | from datetime import datetime
from unittest import TestCase
from unittest.mock import MagicMock
from test.base import ClientBaseCase
from linode_api4 import LongviewSubscription, LinodeClient, ApiError
class LinodeClientGeneralTest(ClientBaseCase):
"""
Tests methods of the LinodeClient class that do not live inside of a group.
"""
def test_get_no_empty_body(self):
"""
Tests that a valid JSON body is passed for a GET call
"""
with self.mock_get('linode/instances') as m:
self.client.regions()
self.assertEqual(m.call_data_raw, None)
def test_get_account(self):
a = self.client.account()
self.assertEqual(a._populated, True)
self.assertEqual(a.first_name, 'Test')
self.assertEqual(a.last_name, 'Guy')
self.assertEqual(a.email, '[email protected]')
self.assertEqual(a.phone, '123-456-7890')
self.assertEqual(a.company, 'Linode')
self.assertEqual(a.address_1, '3rd & Arch St')
self.assertEqual(a.address_2, '')
self.assertEqual(a.city, 'Philadelphia')
self.assertEqual(a.state, 'PA')
self.assertEqual(a.country, 'US')
self.assertEqual(a.zip, '19106')
self.assertEqual(a.tax_id, '')
self.assertEqual(a.balance, 0)
self.assertEqual(a.capabilities, ["Linodes","NodeBalancers","Block Storage","Object Storage"])
def test_get_regions(self):
r = self.client.regions()
self.assertEqual(len(r), 11)
for region in r:
self.assertTrue(region._populated)
self.assertIsNotNone(region.id)
self.assertIsNotNone(region.country)
if region.id in ('us-east', 'eu-central', 'ap-south'):
self.assertEqual(region.capabilities, ["Linodes","NodeBalancers","Block Storage","Object Storage"])
else:
self.assertEqual(region.capabilities, ["Linodes","NodeBalancers","Block Storage"])
self.assertEqual(region.status, "ok")
self.assertIsNotNone(region.resolvers)
self.assertIsNotNone(region.resolvers.ipv4)
self.assertIsNotNone(region.resolvers.ipv6)
def test_get_images(self):
r = self.client.images()
self.assertEqual(len(r), 4)
for image in r:
self.assertTrue(image._populated)
self.assertIsNotNone(image.id)
def test_get_domains(self):
"""
Tests that domains can be retrieved and are marshalled properly
"""
r = self.client.domains()
self.assertEqual(len(r), 1)
domain = r.first()
self.assertEqual(domain.domain, 'example.org')
self.assertEqual(domain.type, 'master')
self.assertEqual(domain.id, 12345)
self.assertEqual(domain.axfr_ips, [])
self.assertEqual(domain.retry_sec, 0)
self.assertEqual(domain.ttl_sec, 300)
self.assertEqual(domain.status, 'active')
self.assertEqual(domain.master_ips, [],)
self.assertEqual(domain.description, "",)
self.assertEqual(domain.group, "",)
self.assertEqual(domain.expire_sec, 0,)
self.assertEqual(domain.soa_email, "[email protected]",)
self.assertEqual(domain.refresh_sec, 0)
def test_image_create(self):
"""
Tests that an Image can be created successfully
"""
with self.mock_post('images/private/123') as m:
i = self.client.image_create(654, 'Test-Image', 'This is a test')
self.assertIsNotNone(i)
self.assertEqual(i.id, 'private/123')
self.assertEqual(m.call_url, '/images')
self.assertEqual(m.call_data, {
"disk_id": 654,
"label": "Test-Image",
"description": "This is a test",
})
def test_get_volumes(self):
v = self.client.volumes()
self.assertEqual(len(v), 3)
self.assertEqual(v[0].label, 'block1')
self.assertEqual(v[0].region.id, 'us-east-1a')
self.assertEqual(v[1].label, 'block2')
self.assertEqual(v[1].size, 100)
self.assertEqual(v[2].size, 200)
self.assertEqual(v[2].label, 'block3')
assert v[0].tags == ["something"]
assert v[1].tags == []
assert v[2].tags == ["attached"]
def test_get_tags(self):
"""
Tests that a list of Tags can be retrieved as expected
"""
t = self.client.tags()
self.assertEqual(len(t), 2)
self.assertEqual(t[0].label, 'nothing')
self.assertEqual(t[1].label, 'something')
def test_tag_create(self):
"""
Tests that creating a tag works as expected
"""
# tags don't work like a normal RESTful collection, so we have to do this
with self.mock_post({'label':'nothing'}) as m:
t = self.client.tag_create('nothing')
self.assertIsNotNone(t)
self.assertEqual(t.label, 'nothing')
self.assertEqual(m.call_url, '/tags')
self.assertEqual(m.call_data, {
'label': 'nothing',
})
def test_tag_create_with_ids(self):
"""
Tests that creating a tag with IDs sends the correct request
"""
instance1, instance2 = self.client.linode.instances()[:2]
domain1 = self.client.domains().first()
nodebalancer1, nodebalancer2 = self.client.nodebalancers()[:2]
volume1, volume2 = self.client.volumes()[:2]
# tags don't work like a normal RESTful collection, so we have to do this
with self.mock_post({'label':'pytest'}) as m:
t = self.client.tag_create('pytest',
instances=[instance1.id, instance2],
nodebalancers=[nodebalancer1.id, nodebalancer2],
domains=[domain1.id],
volumes=[volume1.id, volume2])
self.assertIsNotNone(t)
self.assertEqual(t.label, 'pytest')
self.assertEqual(m.call_url, '/tags')
self.assertEqual(m.call_data, {
'label': 'pytest',
'linodes': [instance1.id, instance2.id],
'domains': [domain1.id],
'nodebalancers': [nodebalancer1.id, nodebalancer2.id],
'volumes': [volume1.id, volume2.id],
})
def test_tag_create_with_entities(self):
"""
Tests that creating a tag with entities sends the correct request
"""
instance1, instance2 = self.client.linode.instances()[:2]
domain = self.client.domains().first()
nodebalancer = self.client.nodebalancers().first()
volume = self.client.volumes().first()
# tags don't work like a normal RESTful collection, so we have to do this
with self.mock_post({'label':'pytest'}) as m:
t = self.client.tag_create('pytest',
entities=[instance1, domain, nodebalancer, volume, instance2])
self.assertIsNotNone(t)
self.assertEqual(t.label, 'pytest')
self.assertEqual(m.call_url, '/tags')
self.assertEqual(m.call_data, {
'label': 'pytest',
'linodes': [instance1.id, instance2.id],
'domains': [domain.id],
'nodebalancers': [nodebalancer.id],
'volumes': [volume.id],
})
class AccountGroupTest(ClientBaseCase):
"""
Tests methods of the AccountGroup
"""
def test_get_settings(self):
"""
Tests that account settings can be retrieved.
"""
s = self.client.account.settings()
self.assertEqual(s._populated, True)
self.assertEqual(s.network_helper, False)
self.assertEqual(s.managed, False)
self.assertEqual(type(s.longview_subscription), LongviewSubscription)
self.assertEqual(s.longview_subscription.id, 'longview-100')
self.assertEqual(s.object_storage, "active")
def test_get_invoices(self):
"""
Tests that invoices can be retrieved
"""
i = self.client.account.invoices()
self.assertEqual(len(i), 1)
invoice = i[0]
self.assertEqual(invoice.id, 123456)
self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2))
self.assertEqual(invoice.label, 'Invoice #123456')
self.assertEqual(invoice.total, 9.51)
class LinodeGroupTest(ClientBaseCase):
"""
Tests methods of the LinodeGroup
"""
def test_instance_create(self):
"""
Tests that a Linode Instance can be created successfully
"""
with self.mock_post('linode/instances/123') as m:
l = self.client.linode.instance_create('g5-standard-1', 'us-east-1a')
self.assertIsNotNone(l)
self.assertEqual(l.id, 123)
self.assertEqual(m.call_url, '/linode/instances')
self.assertEqual(m.call_data, {
"region": "us-east-1a",
"type": "g5-standard-1"
})
def test_instance_create_with_image(self):
"""
Tests that a Linode Instance can be created with an image, and a password generated
"""
with self.mock_post('linode/instances/123') as m:
l, pw = self.client.linode.instance_create(
'g5-standard-1', 'us-east-1a', image='linode/debian9')
self.assertIsNotNone(l)
self.assertEqual(l.id, 123)
self.assertEqual(m.call_url, '/linode/instances')
self.assertEqual(m.call_data, {
"region": "us-east-1a",
"type": "g5-standard-1",
"image": "linode/debian9",
"root_pass": pw,
})
class LongviewGroupTest(ClientBaseCase):
"""
Tests methods of the LongviewGroup
"""
def test_get_clients(self):
"""
Tests that a list of LongviewClients can be retrieved
"""
r = self.client.longview.clients()
self.assertEqual(len(r), 2)
self.assertEqual(r[0].label, "test_client_1")
self.assertEqual(r[0].id, 1234)
self.assertEqual(r[1].label, "longview5678")
self.assertEqual(r[1].id, 5678)
def test_client_create(self):
"""
Tests that creating a client calls the api correctly
"""
with self.mock_post('longview/clients/5678') as m:
client = self.client.longview.client_create()
self.assertIsNotNone(client)
self.assertEqual(client.id, 5678)
self.assertEqual(client.label, 'longview5678')
self.assertEqual(m.call_url, '/longview/clients')
self.assertEqual(m.call_data, {})
def test_client_create_with_label(self):
"""
Tests that creating a client with a label calls the api correctly
"""
with self.mock_post('longview/clients/1234') as m:
client = self.client.longview.client_create(label='test_client_1')
self.assertIsNotNone(client)
self.assertEqual(client.id, 1234)
self.assertEqual(client.label, 'test_client_1')
self.assertEqual(m.call_url, '/longview/clients')
self.assertEqual(m.call_data, {"label": "test_client_1"})
def test_get_subscriptions(self):
"""
Tests that Longview subscriptions can be retrieved
"""
r = self.client.longview.subscriptions()
self.assertEqual(len(r), 4)
expected_results = (
("longview-10", "Longview Pro 10 pack"),
("longview-100", "Longview Pro 100 pack"),
("longview-3", "Longview Pro 3 pack"),
("longview-40", "Longview Pro 40 pack"),
)
for result, (expected_id, expected_label) in zip(r, expected_results):
self.assertEqual(result.id, expected_id)
self.assertEqual(result.label, expected_label)
class LKEGroupTest(ClientBaseCase):
"""
Tests methods of the LKEGroupTest
"""
def test_kube_version(self):
"""
Tests that KubeVersions can be retrieved
"""
versions = self.client.lke.versions()
self.assertEqual(len(versions), 3)
self.assertEqual(versions[0].id, "1.19")
self.assertEqual(versions[1].id, "1.18")
self.assertEqual(versions[2].id, "1.17")
def test_cluster_create_with_api_objects(self):
"""
Tests clusters can be created using api objects
"""
region = self.client.regions().first()
node_type = self.client.linode.types()[0]
version = self.client.lke.versions()[0]
node_pools = self.client.lke.node_pool(node_type, 3)
with self.mock_post("lke/clusters") as m:
cluster = self.client.lke.cluster_create(
region, "example-cluster", node_pools, version
)
self.assertEqual(m.call_data["region"], "ap-west")
self.assertEqual(m.call_data["node_pools"],
[{"type": "g5-nanode-1", "count": 3}])
self.assertEqual(m.call_data["k8s_version"], "1.19")
self.assertEqual(cluster.id, 18881)
self.assertEqual(cluster.region.id, "ap-west")
self.assertEqual(cluster.k8s_version.id, "1.19")
def test_cluster_create_with_string_repr(self):
"""
Tests clusters can be created using string representations
"""
with self.mock_post("lke/clusters") as m:
cluster = self.client.lke.cluster_create(
"ap-west", "example-cluster",
{"type": "g6-standard-1", "count": 3}, "1.19"
)
self.assertEqual(m.call_data["region"], "ap-west")
self.assertEqual(m.call_data["node_pools"],
[{"type": "g6-standard-1", "count": 3}])
self.assertEqual(m.call_data["k8s_version"], "1.19")
self.assertEqual(cluster.id, 18881)
self.assertEqual(cluster.region.id, "ap-west")
self.assertEqual(cluster.k8s_version.id, "1.19")
class ProfileGroupTest(ClientBaseCase):
"""
Tests methods of the ProfileGroup
"""
def test_get_sshkeys(self):
"""
Tests that a list of SSH Keys can be retrieved
"""
r = self.client.profile.ssh_keys()
self.assertEqual(len(r), 2)
key1, key2 = r
self.assertEqual(key1.label, 'Home Ubuntu PC')
self.assertEqual(key1.created, datetime(year=2018, month=9, day=14, hour=13,
minute=0, second=0))
self.assertEqual(key1.id, 22)
self.assertEqual(
key1.ssh_key, "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98"
"ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD"
"eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO"
"TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i"
"muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo"
"F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC"
"MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy"
"nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz"
"/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz"
"tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v"
"TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH"
"d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b"
"cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI"
"CmhW7erNJNVxYjtzseGpBLmRRUTsT038w== dorthu@dorthu-command")
def test_client_create(self):
"""
Tests that creating a client calls the api correctly
"""
with self.mock_post('longview/clients/5678') as m:
client = self.client.longview.client_create()
self.assertIsNotNone(client)
self.assertEqual(client.id, 5678)
self.assertEqual(client.label, 'longview5678')
self.assertEqual(m.call_url, '/longview/clients')
self.assertEqual(m.call_data, {})
def test_ssh_key_create(self):
"""
Tests that creating an ssh key works as expected
"""
with self.mock_post('profile/sshkeys/72') as m:
key = self.client.profile.ssh_key_upload(
"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98"
"ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD"
"eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO"
"TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i"
"muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo"
"F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC"
"MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy"
"nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz"
"/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz"
"tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v"
"TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH"
"d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b"
"cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI"
"CmhW7erNJNVxYjtzseGpBLmRRUTsT038w==dorthu@dorthu-command",
'Work Laptop')
self.assertIsNotNone(key)
self.assertEqual(key.id, 72)
self.assertEqual(key.label, 'Work Laptop')
self.assertEqual(m.call_url, '/profile/sshkeys')
self.assertEqual(m.call_data, {
"ssh_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDe9NlKepJsI/S98"
"ISBJmG+cpEARtM0T1Qa5uTOUB/vQFlHmfQW07ZfA++ybPses0vRCD"
"eWyYPIuXcV5yFrf8YAW/Am0+/60MivT3jFY0tDfcrlvjdJAf1NpWO"
"TVlzv0gpsHFO+XIZcfEj3V0K5+pOMw9QGVf6Qbg8qzHVDPFdYKu3i"
"muc9KHY8F/b4DN/Wh17k3xAJpspCZEFkn0bdaYafJj0tPs0k78JRo"
"F2buc3e3M6dlvHaoON1votmrri9lut65OIpglOgPwE3QU8toGyyoC"
"MGaT4R7kIRjXy3WSyTMAi0KTAdxRK+IlDVMXWoE5TdLovd0a9L7qy"
"nZungKhKZUgFma7r9aTFVHXKh29Tzb42neDTpQnZ/Et735sDC1vfz"
"/YfgZNdgMUXFJ3+uA4M/36/Vy3Dpj2Larq3qY47RDFitmwSzwUlfz"
"tUoyiQ7e1WvXHT4N4Z8K2FPlTvNMg5CSjXHdlzcfiRFPwPn13w36v"
"TvAUxPvTa84P1eOLDp/JzykFbhHNh8Cb02yrU28zDeoTTyjwQs0eH"
"d1wtgIXJ8wuUgcaE4LgcgLYWwiKTq4/FnX/9lfvuAiPFl6KLnh23b"
"cKwnNA7YCWlb1NNLb2y+mCe91D8r88FGvbnhnOuVjd/SxQWDHtxCI"
"CmhW7erNJNVxYjtzseGpBLmRRUTsT038w==dorthu@dorthu-command",
"label": "Work Laptop"
})
class ObjectStorageGroupTest(ClientBaseCase):
"""
Tests for the ObjectStorageGroup
"""
def test_get_clusters(self):
"""
Tests that Object Storage Clusters can be retrieved
"""
clusters = self.client.object_storage.clusters()
self.assertEqual(len(clusters), 1)
cluster = clusters[0]
self.assertEqual(cluster.id, 'us-east-1')
self.assertEqual(cluster.region.id, 'us-east')
self.assertEqual(cluster.domain, 'us-east-1.linodeobjects.com')
self.assertEqual(cluster.static_site_domain, 'website-us-east-1.linodeobjects.com')
def test_get_keys(self):
"""
Tests that you can retrieve Object Storage Keys
"""
keys = self.client.object_storage.keys()
self.assertEqual(len(keys), 2)
key1 = keys[0]
key2 = keys[1]
self.assertEqual(key1.id, 1)
self.assertEqual(key1.label, 'object-storage-key-1')
self.assertEqual(key1.access_key, 'testAccessKeyHere123')
self.assertEqual(key1.secret_key, '[REDACTED]')
self.assertEqual(key2.id, 2)
self.assertEqual(key2.label, 'object-storage-key-2')
self.assertEqual(key2.access_key, 'testAccessKeyHere456')
self.assertEqual(key2.secret_key, '[REDACTED]')
def test_keys_create(self):
"""
Tests that you can create Object Storage Keys
"""
with self.mock_post('object-storage/keys/1') as m:
keys = self.client.object_storage.keys_create('object-storage-key-1')
self.assertIsNotNone(keys)
self.assertEqual(keys.id, 1)
self.assertEqual(keys.label, 'object-storage-key-1')
self.assertEqual(m.call_url, '/object-storage/keys')
self.assertEqual(m.call_data, {"label":"object-storage-key-1"})
class NetworkingGroupTest(ClientBaseCase):
"""
Tests for the NetworkingGroup
"""
def test_get_vlans(self):
"""
Tests that Object Storage Clusters can be retrieved
"""
vlans = self.client.networking.vlans()
self.assertEqual(len(vlans), 1)
self.assertEqual(vlans[0].label, 'vlan-test')
self.assertEqual(vlans[0].region.id, 'us-southeast')
self.assertEqual(len(vlans[0].linodes), 2)
self.assertEqual(vlans[0].linodes[0], 111)
self.assertEqual(vlans[0].linodes[1], 222)
def test_firewall_create(self):
with self.mock_post('networking/firewalls/123') as m:
rules = {
'outbound': [],
'outbound_policy': 'DROP',
'inbound': [],
'inbound_policy': 'DROP'
}
f = self.client.networking.firewall_create('test-firewall-1', rules,
status='enabled')
self.assertIsNotNone(f)
self.assertEqual(m.call_url, '/networking/firewalls')
self.assertEqual(m.method, 'post')
self.assertEqual(f.id, 123)
self.assertEqual(m.call_data, {
'label': 'test-firewall-1',
'status': 'enabled',
'rules': rules
})
def test_get_firewalls(self):
"""
Tests that firewalls can be retrieved
"""
f = self.client.networking.firewalls()
self.assertEqual(len(f), 1)
firewall = f[0]
self.assertEqual(firewall.id, 123)
class LinodeClientRateLimitRetryTest(TestCase):
"""
Tests for rate limiting errors.
.. warning::
This test class _does not_ follow normal testing conventions for this project,
as requests are not automatically mocked. Only add tests to this class if they
pertain to the 429 retry logic, and make sure you mock the requests calls yourself
(or else they will make real requests and those won't work).
"""
def setUp(self):
self.client = LinodeClient("testing", base_url="/", retry_rate_limit_interval=1)
# sidestep the validation to do immediate retries so tests aren't slow
self.client.retry_rate_limit_interval = 0.1
def _get_mock_response(self, response_code):
"""
Helper function to return a mock response
"""
ret = MagicMock()
ret.status_code = response_code
ret.json.return_value = {}
return ret
def test_retry_429s(self):
"""
Tests that 429 responses are automatically retried
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
if called < 2:
return self._get_mock_response(429)
return self._get_mock_response(200)
response = self.client._api_call('/test', method=test_method)
# it retried once, got the empty object
assert called == 2
assert response == {}, response
def test_retry_max_attempts(self):
"""
Tests that a request will fail after 5 429 responses in a row
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
return self._get_mock_response(429)
try:
response = self.client._api_call('/test', method=test_method)
assert False, "Unexpectedly did not raise ApiError!"
except ApiError as e:
assert e.status == 429
# it tried 5 times
assert called == 5
def test_api_error_with_retry(self):
"""
Tests that a 300+ response still raises an ApiError even if retries are
enabled
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
return self._get_mock_response(400)
try:
response = self.client._api_call('/test', method=test_method)
assert False, "Unexpectedly did not raise ApiError!"
except ApiError as e:
assert e.status == 400
# it tried 5 times
assert called == 1
def test_api_error_on_retry(self):
"""
Tests that we'll stop retrying and raise immediately if we get a 300+
response after a 429
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
if called < 2:
return self._get_mock_response(429)
return self._get_mock_response(400)
try:
response = self.client._api_call('/test', method=test_method)
assert False, "Unexpectedly did not raise ApiError!"
except ApiError as e:
assert e.status == 400
# it tried 5 times
assert called == 2
def test_works_first_time(self):
"""
Tests that the response is handled correctly if we got a 200 on the first
try
"""
called = 0
def test_method(*args, **kwargs):
nonlocal called
called += 1
return self._get_mock_response(200)
response = self.client._api_call('/test', method=test_method)
# it tried 5 times
assert called == 1
assert response == {}
|
py | b404b21dc8cd8cdbcb36aba6498e19f015a7b7b7 | SCRIPT_ID = "XXXXXXXXXXXXXXXXXXXXXXX"
DEPLOYMENT_ID = "YYYYYYYYYYYYYYYYYYYY"
CORE_APP_ID = SCRIPT_ID
devMode = True
|
py | b404b242f723722de7ec6e1656a3ef7fb4223d86 | # coding: utf-8
"""
FlashArray REST API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 2.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re
import six
import typing
from ....properties import Property
if typing.TYPE_CHECKING:
from pypureclient.flasharray.FA_2_6 import models
class AdminPatch(object):
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'api_token': 'ApiToken',
'is_local': 'bool',
'locked': 'bool',
'lockout_remaining': 'int',
'password': 'str',
'public_key': 'str',
'role': 'AdminRole',
'old_password': 'str'
}
attribute_map = {
'name': 'name',
'api_token': 'api_token',
'is_local': 'is_local',
'locked': 'locked',
'lockout_remaining': 'lockout_remaining',
'password': 'password',
'public_key': 'public_key',
'role': 'role',
'old_password': 'old_password'
}
required_args = {
}
def __init__(
self,
name=None, # type: str
api_token=None, # type: models.ApiToken
is_local=None, # type: bool
locked=None, # type: bool
lockout_remaining=None, # type: int
password=None, # type: str
public_key=None, # type: str
role=None, # type: models.AdminRole
old_password=None, # type: str
):
"""
Keyword args:
name (str): A user-specified name. The name must be locally unique and cannot be changed.
api_token (ApiToken)
is_local (bool): Returns a value of `true` if the user is local to the machine, otherwise `false`.
locked (bool): Returns a value of `true` if the user is currently locked out, otherwise `false`. Can be patched to `false` to unlock a user. This field is only visible to `array_admin` roles. For all other users, the value is always `null`.
lockout_remaining (int): The remaining lockout period, in milliseconds, if the user is locked out. This field is only visible to `array_admin` roles. For all other users, the value is always `null`.
password (str): Password associated with the account.
public_key (str): Public key for SSH access.
role (AdminRole)
old_password (str): The current password.
"""
if name is not None:
self.name = name
if api_token is not None:
self.api_token = api_token
if is_local is not None:
self.is_local = is_local
if locked is not None:
self.locked = locked
if lockout_remaining is not None:
self.lockout_remaining = lockout_remaining
if password is not None:
self.password = password
if public_key is not None:
self.public_key = public_key
if role is not None:
self.role = role
if old_password is not None:
self.old_password = old_password
def __setattr__(self, key, value):
if key not in self.attribute_map:
raise KeyError("Invalid key `{}` for `AdminPatch`".format(key))
self.__dict__[key] = value
def __getattribute__(self, item):
value = object.__getattribute__(self, item)
if isinstance(value, Property):
raise AttributeError
else:
return value
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
if hasattr(self, attr):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AdminPatch, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AdminPatch):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
py | b404b33fb279a3aa85c21bc69f97181b4d18a55b | import cv2
import numpy as np
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
# Load our image - this should be a new frame since last time!
binary_warped = mpimg.imread('warped_example.jpg')
# Polynomial fit values from the previous frame
# Make sure to grab the actual values from the previous step in your project!
left_fit = np.array([ 2.13935315e-04, -3.77507980e-01, 4.76902175e+02])
right_fit = np.array([4.17622148e-04, -4.93848953e-01, 1.11806170e+03])
def fit_poly(img_shape, leftx, lefty, rightx, righty):
### TO-DO: Fit a second order polynomial to each with np.polyfit() ###
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
### TO-DO: Calc both polynomials using ploty, left_fit and right_fit ###
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
return left_fitx, right_fitx, ploty
def search_around_poly(binary_warped):
# HYPERPARAMETER
# Choose the width of the margin around the previous polynomial to search
# The quiz grader expects 100 here, but feel free to tune on your own!
margin = 100
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
### TO-DO: Set the area of search based on activated x-values ###
### within the +/- margin of our polynomial function ###
### Hint: consider the window areas for the similarly named variables ###
### in the previous quiz, but change the windows to our new search area ###
left_lane_inds = ((nonzerox > (left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - margin)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - margin)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + margin)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
# Fit new polynomials
left_fitx, right_fitx, ploty = fit_poly(binary_warped.shape, leftx, lefty, rightx, righty)
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))*255
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
# Plot the polynomial lines onto the image
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
## End visualization steps ##
return result
# Run image through the pipeline
# Note that in your project, you'll also want to feed in the previous fits
result = search_around_poly(binary_warped)
# View your output
plt.imshow(result) |
py | b404b396b84282068d0bb01eba273b6fad6f1563 | from warcio.utils import to_native_str
from six.moves.urllib.parse import unquote_plus
from io import BytesIO
import base64
import cgi
# ============================================================================
def append_post_query(req, resp):
len_ = req.http_headers.get_header('Content-Length')
content_type = req.http_headers.get_header('Content-Type')
stream = req.buffered_stream
stream.seek(0)
post_query = post_query_extract(content_type, len_, stream)
if not post_query:
return
url = req.rec_headers.get_header('WARC-Target-URI')
if '?' not in url:
url += '?'
else:
url += '&'
url += post_query
return url
# ============================================================================
def post_query_extract(mime, length, stream):
"""
Extract a url-encoded form POST/PUT from stream
content length, return None
Attempt to decode application/x-www-form-urlencoded or multipart/*,
otherwise read whole block and b64encode
"""
post_query = b''
try:
length = int(length)
except (ValueError, TypeError):
return
if length <= 0:
return
while length > 0:
buff = stream.read(length)
length -= len(buff)
if not buff:
break
post_query += buff
if not mime:
mime = ''
if mime.startswith('application/x-www-form-urlencoded'):
post_query = to_native_str(post_query)
post_query = unquote_plus(post_query)
elif mime.startswith('multipart/'):
env = {'REQUEST_METHOD': 'POST',
'CONTENT_TYPE': mime,
'CONTENT_LENGTH': len(post_query)}
args = dict(fp=BytesIO(post_query),
environ=env,
keep_blank_values=True)
if six.PY3:
args['encoding'] = 'utf-8'
data = cgi.FieldStorage(**args)
values = []
for item in data.list:
values.append((item.name, item.value))
post_query = urlencode(values, True)
else:
post_query = base64.b64encode(post_query)
post_query = to_native_str(post_query)
post_query = '__warc_post_data=' + post_query
return post_query
|
py | b404b5684906ca762ba7a979264c1c1b83f56e4a | class Point4d(object,ISerializable,IEquatable[Point4d],IEpsilonComparable[Point4d]):
"""
Point4d(x: float,y: float,z: float,w: float)
Point4d(point: Point3d)
"""
@staticmethod
def Add(point1,point2):
""" Add(point1: Point4d,point2: Point4d) -> Point4d """
pass
def EpsilonEquals(self,other,epsilon):
""" EpsilonEquals(self: Point4d,other: Point4d,epsilon: float) -> bool """
pass
def Equals(self,*__args):
"""
Equals(self: Point4d,point: Point4d) -> bool
Equals(self: Point4d,obj: object) -> bool
"""
pass
def GetHashCode(self):
""" GetHashCode(self: Point4d) -> int """
pass
@staticmethod
def Multiply(point,d):
""" Multiply(point: Point4d,d: float) -> Point4d """
pass
@staticmethod
def Subtract(point1,point2):
""" Subtract(point1: Point4d,point2: Point4d) -> Point4d """
pass
def __add__(self,*args):
""" x.__add__(y) <==> x+y """
pass
def __eq__(self,*args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __mul__(self,*args):
""" x.__mul__(y) <==> x*yx.__mul__(y) <==> x*y """
pass
@staticmethod
def __new__(self,*__args):
"""
__new__[Point4d]() -> Point4d
__new__(cls: type,x: float,y: float,z: float,w: float)
__new__(cls: type,point: Point3d)
"""
pass
def __ne__(self,*args):
pass
def __radd__(self,*args):
""" __radd__(point1: Point4d,point2: Point4d) -> Point4d """
pass
def __reduce_ex__(self,*args):
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
def __rmul__(self,*args):
""" __rmul__(point1: Point4d,point2: Point4d) -> float """
pass
def __rsub__(self,*args):
""" __rsub__(point1: Point4d,point2: Point4d) -> Point4d """
pass
def __str__(self,*args):
pass
def __sub__(self,*args):
""" x.__sub__(y) <==> x-y """
pass
W=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: W(self: Point4d) -> float
Set: W(self: Point4d)=value
"""
X=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: X(self: Point4d) -> float
Set: X(self: Point4d)=value
"""
Y=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Y(self: Point4d) -> float
Set: Y(self: Point4d)=value
"""
Z=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Get: Z(self: Point4d) -> float
Set: Z(self: Point4d)=value
"""
Unset=None
|
py | b404b6c4252541fcf9085d67f08d4bbd306a6e08 | import torch
from torch.nn import functional
from audio_zen.acoustics.feature import drop_band
from audio_zen.model.base_model import BaseModel
from audio_zen.model.module.sequence_model import SequenceModel
class Model(BaseModel):
def __init__(self,
num_freqs,
look_ahead,
sequence_model,
fb_num_neighbors,
sb_num_neighbors,
fb_output_activate_function,
sb_output_activate_function,
fb_model_hidden_size,
sb_model_hidden_size,
norm_type="offline_laplace_norm",
num_groups_in_drop_band=2,
weight_init=True,
variation=None,
):
"""
FullSubNet model (cIRM mask)
Args:
num_freqs: Frequency dim of the input
sb_num_neighbors: Number of the neighbor frequencies in each side
look_ahead: Number of use of the future frames
sequence_model: Chose one sequence model as the basic model (GRU, LSTM)
"""
super().__init__()
assert sequence_model in ("GRU", "LSTM"), f"{self.__class__.__name__} only support GRU and LSTM."
self.fb_model = SequenceModel(
input_size=num_freqs,
output_size=num_freqs,
hidden_size=fb_model_hidden_size,
num_layers=2,
bidirectional=False,
sequence_model=sequence_model,
output_activate_function=fb_output_activate_function
)
self.sb_model = SequenceModel(
input_size=(sb_num_neighbors * 2 + 1) + (fb_num_neighbors * 2 + 1),
output_size=2,
hidden_size=sb_model_hidden_size,
num_layers=2,
bidirectional=False,
sequence_model=sequence_model,
output_activate_function=sb_output_activate_function
)
self.sb_num_neighbors = sb_num_neighbors
self.fb_num_neighbors = fb_num_neighbors
self.look_ahead = look_ahead
self.norm = self.norm_wrapper(norm_type)
self.num_groups_in_drop_band = num_groups_in_drop_band
self.variation=variation
if weight_init:
self.apply(self.weight_init)
def forward(self, noisy_mag, bgm_mag):
"""
Args:
noisy_mag: noisy magnitude spectrogram
Returns:
The real part and imag part of the enhanced spectrogram
Shapes:
noisy_mag: [B, 1, F, T]
return: [B, 2, F, T]
"""
assert noisy_mag.shape == bgm_mag.shape #when this is assured, we can check derive the below info just from noisy_mag
assert noisy_mag.dim() == 4
bgm_mag = functional.pad(bgm_mag, [0, self.look_ahead]) # Pad the look ahead
noisy_mag = functional.pad(noisy_mag, [0, self.look_ahead]) # Pad the look ahead
batch_size, num_channels, num_freqs, num_frames = noisy_mag.size()
assert num_channels == 1, f"{self.__class__.__name__} takes the mag feature as inputs."
if self.variation==None:
# Fullband model #BGM_MAG HERE! #num_channels*num_freqs == num_freqs
fb_input = self.norm(bgm_mag).reshape(batch_size, num_channels * num_freqs, num_frames)
elif self.variation=="summed_input":
fb_input = self.norm(bgm_mag+noisy_mag).reshape(batch_size, num_channels * num_freqs, num_frames)
fb_output = self.fb_model(fb_input).reshape(batch_size, 1, num_freqs, num_frames)
# Unfold the output of the fullband model, [B, N=F, C, F_f, T]
fb_output_unfolded = self.unfold(fb_output, num_neighbor=self.fb_num_neighbors)
fb_output_unfolded = fb_output_unfolded.reshape(batch_size, num_freqs, self.fb_num_neighbors * 2 + 1, num_frames)
# Unfold noisy input, [B, N=F, C, F_s, T]
noisy_mag_unfolded = self.unfold(noisy_mag, num_neighbor=self.sb_num_neighbors)
noisy_mag_unfolded = noisy_mag_unfolded.reshape(batch_size, num_freqs, self.sb_num_neighbors * 2 + 1, num_frames)
# Concatenation, [B, F, (F_s + F_f), T]
sb_input = torch.cat([noisy_mag_unfolded, fb_output_unfolded], dim=2)
sb_input = self.norm(sb_input)
# Speeding up training without significant performance degradation. These will be updated to the paper later.
if batch_size > 1:
sb_input = drop_band(sb_input.permute(0, 2, 1, 3), num_groups=self.num_groups_in_drop_band) # [B, (F_s + F_f), F//num_groups, T]
num_freqs = sb_input.shape[2]
sb_input = sb_input.permute(0, 2, 1, 3) # [B, F//num_groups, (F_s + F_f), T]
sb_input = sb_input.reshape(
batch_size * num_freqs,
(self.sb_num_neighbors * 2 + 1) + (self.fb_num_neighbors * 2 + 1),
num_frames
)
# [B * F, (F_s + F_f), T] => [B * F, 2, T] => [B, F, 2, T]
sb_mask = self.sb_model(sb_input)
sb_mask = sb_mask.reshape(batch_size, num_freqs, 2, num_frames).permute(0, 2, 1, 3).contiguous()
output = sb_mask[:, :, :, self.look_ahead:]
return output
if __name__ == "__main__":
import datetime
with torch.no_grad():
model = Model(
sb_num_neighbors=15,
fb_num_neighbors=0,
num_freqs=257,
look_ahead=2,
sequence_model="LSTM",
fb_output_activate_function="ReLU",
sb_output_activate_function=None,
fb_model_hidden_size=512,
sb_model_hidden_size=384,
weight_init=False,
norm_type="offline_laplace_norm",
num_groups_in_drop_band=2,
)
# ipt = torch.rand(3, 800) # 1.6s
# ipt_len = ipt.shape[-1]
# # 1000 frames (16s) - 5.65s (35.31%,纯模型) - 5.78s
# # 500 frames (8s) - 3.05s (38.12%,纯模型) - 3.04s
# # 200 frames (3.2s) - 1.19s (37.19%,纯模型) - 1.20s
# # 100 frames (1.6s) - 0.62s (38.75%,纯模型) - 0.65s
# start = datetime.datetime.now()
#
# complex_tensor = torch.stft(ipt, n_fft=512, hop_length=256)
# mag = (complex_tensor.pow(2.).sum(-1) + 1e-8).pow(0.5 * 1.0).unsqueeze(1)
# print(f"STFT: {datetime.datetime.now() - start}, {mag.shape}")
#
# enhanced_complex_tensor = model(mag).detach().permute(0, 2, 3, 1)
# print(enhanced_complex_tensor.shape)
# print(f"Model Inference: {datetime.datetime.now() - start}")
#
# enhanced = torch.istft(enhanced_complex_tensor, 512, 256, length=ipt_len)
# print(f"iSTFT: {datetime.datetime.now() - start}")
#
# print(f"{datetime.datetime.now() - start}")
ipt = torch.rand(3, 1, 257, 200)
print(model(ipt).shape)
|
py | b404b805779664fa5a3caacba1f70a3c8c75d10c | #testall.py - run all unittests test cexcept the ones in the test directory ...
import unittest
# ensure coverage sees even modules without tests
import hudjango.auth.backends
import hudjango.auth.decorators
import hudjango.fields.testdefaulting
import hudjango.fields.testscalingimagefield
import hudjango.middleware.clienttrack
import hudjango.middleware.testthreadlocals
# need to figure out huImages dependency
#import hudjango.storage
#import hudjango.templatetags.hudjango
tests = [hudjango.middleware.testthreadlocals.suite,
hudjango.fields.testdefaulting.suite,
hudjango.fields.testscalingimagefield.suite]
suite = unittest.TestSuite(tests)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=3).run(suite)
|
py | b404b83aa6eae4a541881e6aec6017f554489bc7 | """
Assorted utilities for working with neural networks in AllenNLP.
"""
from typing import List, Optional, Sequence, TypeVar
import torch
T = TypeVar('T')
def combine_tensors(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
"""
Combines a list of tensors using element-wise operations and concatenation, specified by a
``combination`` string. The string refers to (1-indexed) positions in the input tensor list,
and looks like ``"1,2,1+2,3-1"``.
We allow the following kinds of combinations: ``x``, ``x*y``, ``x+y``, ``x-y``, and ``x/y``,
where ``x`` and ``y`` are positive integers less than or equal to ``len(tensors)``. Each of
the binary operations is performed elementwise. You can give as many combinations as you want
in the ``combination`` string. For example, for the input string ``"1,2,1*2"``, the result
would be ``[1;2;1*2]``, as you would expect, where ``[;]`` is concatenation along the last
dimension.
If you have a fixed, known way to combine tensors that you use in a model, you should probably
just use something like ``torch.cat([x_tensor, y_tensor, x_tensor * y_tensor])``. This
function adds some complexity that is only necessary if you want the specific combination used
to be `configurable`.
If you want to do any element-wise operations, the tensors involved in each element-wise
operation must have the same shape.
This function also accepts ``x`` and ``y`` in place of ``1`` and ``2`` in the combination
string.
"""
combination = combination.replace('x', '1').replace('y', '2')
to_concatenate = [_get_combination(piece, tensors) for piece in combination.split(',')]
return torch.cat(to_concatenate, dim=-1)
def _rindex(sequence: Sequence[T], obj: T) -> int:
"""
Return zero-based index in the sequence of the last item whose value is equal to obj. Raises a
ValueError if there is no such item.
Parameters
----------
sequence : ``Sequence[T]``
obj : ``T``
Returns
-------
zero-based index associated to the position of the last item equal to obj
"""
for i in range(len(sequence) - 1, -1, -1):
if sequence[i] == obj:
return i
raise ValueError(f"Unable to find {obj} in sequence {sequence}.")
def _get_combination(combination: str, tensors: List[torch.Tensor]) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return tensors[index]
else:
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
return first_tensor * second_tensor
elif operation == '/':
return first_tensor / second_tensor
elif operation == '+':
return first_tensor + second_tensor
elif operation == '-':
return first_tensor - second_tensor
def combine_tensors_and_multiply(combination: str,
tensors: List[torch.Tensor],
weights: torch.nn.Parameter) -> torch.Tensor:
"""
Like :func:`combine_tensors`, but does a weighted (linear) multiplication while combining.
This is a separate function from ``combine_tensors`` because we try to avoid instantiating
large intermediate tensors during the combination, which is possible because we know that we're
going to be multiplying by a weight vector in the end.
Parameters
----------
combination : ``str``
Same as in :func:`combine_tensors`
tensors : ``List[torch.Tensor]``
A list of tensors to combine, where the integers in the ``combination`` are (1-indexed)
positions in this list of tensors. These tensors are all expected to have either three or
four dimensions, with the final dimension being an embedding. If there are four
dimensions, one of them must have length 1.
weights : ``torch.nn.Parameter``
A vector of weights to use for the combinations. This should have shape (combined_dim,),
as calculated by :func:`get_combined_dim`.
"""
combination = combination.replace('x', '1').replace('y', '2')
pieces = combination.split(',')
tensor_dims = [tensor.size(-1) for tensor in tensors]
combination_dims = [_get_combination_dim(piece, tensor_dims) for piece in pieces]
dims_so_far = 0
to_sum = []
for piece, combination_dim in zip(pieces, combination_dims):
weight = weights[dims_so_far:(dims_so_far + combination_dim)]
dims_so_far += combination_dim
to_sum.append(_get_combination_and_multiply(piece, tensors, weight))
result = to_sum[0]
for result_piece in to_sum[1:]:
result = result + result_piece
return result
def _get_combination_and_multiply(combination: str,
tensors: List[torch.Tensor],
weight: torch.nn.Parameter) -> torch.Tensor:
if combination.isdigit():
index = int(combination) - 1
return torch.matmul(tensors[index], weight)
else:
first_tensor = _get_combination(combination[0], tensors)
second_tensor = _get_combination(combination[2], tensors)
operation = combination[1]
if operation == '*':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == '/':
if first_tensor.dim() > 4 or second_tensor.dim() > 4:
raise ValueError("Tensors with dim > 4 not currently supported")
desired_dim = max(first_tensor.dim(), second_tensor.dim()) - 1
if first_tensor.dim() == 4:
expanded_dim = _rindex(first_tensor.size(), 1)
first_tensor = first_tensor.squeeze(expanded_dim)
if second_tensor.dim() == 4:
expanded_dim = _rindex(second_tensor.size(), 1)
second_tensor = second_tensor.squeeze(expanded_dim)
intermediate = first_tensor * weight
result = torch.matmul(intermediate, second_tensor.pow(-1).transpose(-1, -2))
if result.dim() == desired_dim + 1:
result = result.squeeze(-1)
return result
elif operation == '+':
return torch.matmul(first_tensor, weight) + torch.matmul(second_tensor, weight)
elif operation == '-':
return torch.matmul(first_tensor, weight) - torch.matmul(second_tensor, weight)
def get_combined_dim(combination: str, tensor_dims: List[int]) -> int:
"""
For use with :func:`combine_tensors`. This function computes the resultant dimension when
calling ``combine_tensors(combination, tensors)``, when the tensor dimension is known. This is
necessary for knowing the sizes of weight matrices when building models that use
``combine_tensors``.
Parameters
----------
combination : ``str``
A comma-separated list of combination pieces, like ``"1,2,1*2"``, specified identically to
``combination`` in :func:`combine_tensors`.
tensor_dims : ``List[int]``
A list of tensor dimensions, where each dimension is from the `last axis` of the tensors
that will be input to :func:`combine_tensors`.
"""
combination = combination.replace('x', '1').replace('y', '2')
return sum([_get_combination_dim(piece, tensor_dims) for piece in combination.split(',')])
def _get_combination_dim(combination: str, tensor_dims: List[int]) -> int:
if combination.isdigit():
index = int(combination) - 1
return tensor_dims[index]
else:
first_tensor_dim = _get_combination_dim(combination[0], tensor_dims)
second_tensor_dim = _get_combination_dim(combination[2], tensor_dims)
operation = combination[1]
return first_tensor_dim
|
py | b404b971de24123d3f889a372de2b59a70174ef9 | from .calibrate import calibrate_data
from .truncate import truncate_outliers
|
py | b404ba4a0b2a695956039b183823ee6fe6ed92b1 | ##########################################################################
#
# pgAdmin 4 - PostgreSQL Tools
#
# Copyright (C) 2013 - 2020, The pgAdmin Development Team
# This software is released under the PostgreSQL Licence
#
##########################################################################
from __future__ import print_function
import json
import uuid
from pgadmin.browser.server_groups.servers.databases.schemas.tests import \
utils as schema_utils
from pgadmin.browser.server_groups.servers.databases.tests import \
utils as database_utils
from pgadmin.utils.route import BaseTestGenerator
from regression import parent_node_dict
from regression.python_test_utils import test_utils as utils
from . import utils as fts_temp_utils
class FtsTemplatePutTestCase(BaseTestGenerator):
""" This class will update new FTS template under schema node. """
scenarios = [
# Fetching default URL for FTS template node.
('Fetch FTS template Node URL', dict(url='/browser/fts_template/obj/'))
]
def setUp(self):
self.schema_data = parent_node_dict['schema'][-1]
self.schema_name = self.schema_data['schema_name']
self.schema_id = self.schema_data['schema_id']
self.server_id = self.schema_data['server_id']
self.db_id = self.schema_data['db_id']
self.db_name = parent_node_dict["database"][-1]["db_name"]
self.fts_temp_name = "fts_temp_%s" % str(uuid.uuid4())[1:8]
self.fts_temp_id = fts_temp_utils.create_fts_template(
self.server,
self.db_name,
self.schema_name,
self.fts_temp_name)
def runTest(self):
""" This function will update FTS template present under
test schema. """
db_con = database_utils.connect_database(self,
utils.SERVER_GROUP,
self.server_id,
self.db_id)
if not db_con["info"] == "Database connected.":
raise Exception("Could not connect to database.")
schema_response = schema_utils.verify_schemas(self.server,
self.db_name,
self.schema_name)
if not schema_response:
raise Exception("Could not find the schema.")
fts_response = fts_temp_utils.verify_fts_template(self.server,
self.db_name,
self.fts_temp_name)
if not fts_response:
raise Exception("Could not find the FTS template.")
data = \
{
"description": "This is FTS template update comment",
"id": self.fts_temp_id
}
put_response = self.tester.put(
self.url + str(utils.SERVER_GROUP) + '/' +
str(self.server_id) + '/' +
str(self.db_id) + '/' +
str(self.schema_id) + '/' +
str(self.fts_temp_id),
data=json.dumps(data),
follow_redirects=True)
self.assertEquals(put_response.status_code, 200)
def tearDown(self):
"""This function delete the fts_template and disconnect the test
database."""
fts_temp_utils.delete_fts_template(self.server, self.db_name,
self.schema_name,
self.fts_temp_name)
database_utils.disconnect_database(self, self.server_id,
self.db_id)
|
py | b404bb3baea480e5cd07aae5008f1ebe8b184627 | #!/usr/bin/env python
"""The setup script."""
from __future__ import annotations
from setuptools import find_packages, setup
with open("README.rst", encoding="utf-8") as readme_file:
readme = readme_file.read()
with open("HISTORY.rst", encoding="utf-8") as history_file:
history = history_file.read()
requirements: list[str] = []
requirements_dev = [
"actions-toolkit>=0.0.5",
"black>=21.5b0",
"bump2version>=1.0.1",
"codecov>=2.1.11",
"coverage>=5.5",
"dunamai>=1.5.5",
"flake8>=3.9.1",
"isort>=5.8.0",
"mypy>=0.812",
"pip-tools>=6.1.0",
"pre-commit>=2.12.1",
"pylint>=2.8.2",
"pytest>=6.2.4",
"pytest-cov>=2.11.1",
"pytest-xdist>=2.2.1",
]
requirements_docs = [
"Sphinx>=3.5.4",
"sphinx-autoapi>=1.8.1",
]
requirements_dev += requirements_docs
setup(
author="Sheypex",
author_email="[email protected]",
python_requires=">=3.8",
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
],
description="Higher level controlls for searching (a collection of) sklearn models for best parameters.", # noqa: E501
setup_requires=[
"setuptools-git",
],
install_requires=requirements,
extras_require={
"dev": requirements_dev,
"docs": requirements_docs,
},
license="BSD-3-Clause",
long_description=readme + "\n\n" + history,
name="sklearn_tournament",
packages=find_packages(include=["sklearn_tournament", "sklearn_tournament.*"]),
include_package_data=True,
test_suite="tests",
url="https://github.com/sheypex/sklearn-tournament",
version="0.1.0",
zip_safe=False,
)
|
py | b404bb97abe7818bf5e2f175d6e001973bf7d546 | from types import StringTypes
from sfa.util.faults import *
from sfa.util.xrn import urn_to_hrn
from sfa.util.method import Method
from sfa.util.parameter import Parameter, Mixed
from sfa.trust.auth import Auth
from sfa.server.registry import Registries
class get_registries(Method):
"""
Get a list of connection information for all known registries.
@param cred credential string specifying the rights of the caller
@param a Human readable name (hrn or urn), or list of names or None
@return list of dictionaries with aggregate information.
"""
interfaces = ['registry', 'aggregate', 'slicemgr']
accepts = [
Parameter(str, "Credential string"),
Mixed(Parameter(str, "Human readable name (hrn or urn)"),
Parameter(None, "hrn not specified"))
]
returns = [Parameter(dict, "Registry interface information")]
def call(self, cred, xrn = None):
hrn, type = urn_to_hrn(xrn)
self.api.auth.check(cred, 'list')
registries = Registries(self.api).interfaces.values()
if hrn:
registries = [reg for reg in registries if reg['hrn'] == hrn]
return registries
|
py | b404bcd22bfe6f1fbabf89833cd830cff36ff72e | # Generated by Django 2.2.5 on 2019-10-04 03:50
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database', '0031_auto_20190920_0427'),
]
operations = [
migrations.AlterModelOptions(
name='tag',
options={'ordering': ('order',)},
),
migrations.AddField(
model_name='goal',
name='attained_carbon_footprint_reduction',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='goal',
name='attained_number_of_actions',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='goal',
name='attained_number_of_households',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='goal',
name='target_carbon_footprint_reduction',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='goal',
name='target_number_of_actions',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='goal',
name='target_number_of_households',
field=models.PositiveIntegerField(default=0),
),
]
|
py | b404bcf138e269bcfd2f1168c24472664bf755bb | # Copyright (c) 2021 Mira Geoscience Ltd.
#
# This file is part of geoapps.
#
# geoapps is distributed under the terms and conditions of the MIT License
# (see LICENSE file at the root of this source code package).
import numpy as np
from geoh5py.io import H5Writer
from geoh5py.objects import Curve, Grid2D
from geoh5py.workspace import Workspace
from ipywidgets import (
FloatSlider,
HBox,
IntSlider,
Layout,
Text,
ToggleButton,
VBox,
interactive_output,
)
from matplotlib import collections
from skimage.feature import canny
from skimage.transform import probabilistic_hough_line
from geoapps.plotting import PlotSelection2D
from geoapps.utils import filter_xy
class EdgeDetectionApp(PlotSelection2D):
"""
Widget for Grid2D objects for the automated detection of line features.
The application relies on the Canny and Hough trandforms from the
Scikit-Image library.
:param grid: Grid2D object
:param data: Children data object for the provided grid
Optional
--------
:param sigma [Canny]: standard deviation of the Gaussian filter
:param threshold [Hough]: Value threshold
:param line_length [Hough]: Minimum accepted pixel length of detected lines
:param line_gap [Hough]: Maximum gap between pixels to still form a line.
"""
defaults = {
"h5file": "../../assets/FlinFlon.geoh5",
"objects": "Gravity_Magnetics_drape60m",
"data": "Airborne_Gxx",
"resolution": 50,
"sigma": 0.5,
"compute": True,
"window": {
"azimuth": -20,
},
"ga_group_name": "Edges",
}
object_types = (Grid2D,)
def __init__(self, **kwargs):
self._compute = ToggleButton(
value=False,
description="Compute",
button_style="warning",
tooltip="Description",
icon="check",
)
self._export_as = Text(
value="Edges",
description="Save as:",
disabled=False,
)
self._line_length = IntSlider(
min=1,
max=100,
step=1,
value=1,
continuous_update=False,
description="Line Length",
)
self._line_gap = IntSlider(
min=1,
max=100,
step=1,
value=1,
continuous_update=False,
description="Line Gap",
)
self._sigma = FloatSlider(
min=0.0,
max=10,
step=0.1,
value=1.0,
continuous_update=False,
description="Sigma",
)
self._threshold = IntSlider(
min=1,
max=100,
step=1,
value=1,
continuous_update=False,
description="Threshold",
)
self._window_size = IntSlider(
min=16,
max=512,
value=64,
continuous_update=False,
description="Window size",
)
super().__init__(**kwargs)
out = interactive_output(
self.compute_trigger,
{"compute": self.compute},
)
def save_trigger(_):
self.save_trigger()
# Make changes to trigger warning color
self.trigger.description = "Save to GA"
self.trigger.on_click(save_trigger)
self.trigger.button_style = "success"
def update_name(_):
self.update_name()
self.data.observe(update_name, names="value")
self.update_name()
self._main = VBox(
[
self.project_panel,
HBox(
[
self.main,
VBox(
[
self.sigma,
self.threshold,
self.line_length,
self.line_gap,
self.window_size,
self.compute,
self.export_as,
self.output_panel,
],
layout=Layout(width="50%"),
),
out,
]
),
]
)
@property
def compute(self):
"""ToggleButton"""
return self._compute
@property
def export_as(self):
"""Text"""
return self._export_as
@property
def line_length(self):
"""IntSlider"""
return self._line_length
@property
def line_gap(self):
"""IntSlider"""
return self._line_gap
@property
def sigma(self):
"""FloatSlider"""
return self._sigma
@property
def threshold(self):
"""IntSlider"""
return self._threshold
@property
def window_size(self):
"""IntSlider"""
return self._window_size
def save_trigger(self):
entity, _ = self.get_selected_entities()
if getattr(self.trigger, "vertices", None) is not None:
curves = [
child
for child in self.ga_group.children
if child.name == self.export_as.value
]
if any(curves):
curve = curves[0]
curve._children = []
curve.vertices = self.trigger.vertices
curve.cells = np.vstack(self.trigger.cells).astype("uint32")
# Remove directly on geoh5
project_handle = H5Writer.fetch_h5_handle(self.h5file, entity)
base = list(project_handle.keys())[0]
obj_handle = project_handle[base]["Objects"]
for key in obj_handle[H5Writer.uuid_str(curve.uid)]["Data"].keys():
del project_handle[base]["Data"][key]
del obj_handle[H5Writer.uuid_str(curve.uid)]
H5Writer.save_entity(curve)
else:
curve = Curve.create(
self.workspace,
name=self.export_as.value,
vertices=self.trigger.vertices,
cells=self.trigger.cells,
parent=self.ga_group,
)
if self.live_link.value:
self.live_link_output(self.ga_group)
self.workspace.finalize()
def update_name(self):
if self.data.value is not None:
self.export_as.value = self.data.value
else:
self.export_as.value = "Edges"
def compute_trigger(self, compute):
if compute:
grid, data = self.get_selected_entities()
x = grid.centroids[:, 0].reshape(grid.shape, order="F")
y = grid.centroids[:, 1].reshape(grid.shape, order="F")
z = grid.centroids[:, 2].reshape(grid.shape, order="F")
grid_data = data[0].values.reshape(grid.shape, order="F")
indices = self.indices
ind_x, ind_y = (
np.any(indices, axis=1),
np.any(indices, axis=0),
)
x = x[ind_x, :][:, ind_y]
y = y[ind_x, :][:, ind_y]
z = z[ind_x, :][:, ind_y]
grid_data = grid_data[ind_x, :][:, ind_y]
grid_data -= grid_data.min()
grid_data /= grid_data.max()
if np.any(grid_data):
# Find edges
edges = canny(grid_data, sigma=self.sigma.value, use_quantiles=True)
shape = edges.shape
# Cycle through tiles of square size
max_l = np.min([self.window_size.value, shape[0], shape[1]])
half = np.floor(max_l / 2)
overlap = 1.25
n_cell_y = (shape[0] - 2 * half) * overlap / max_l
n_cell_x = (shape[1] - 2 * half) * overlap / max_l
if n_cell_x > 0:
cnt_x = np.linspace(
half, shape[1] - half, 2 + int(np.round(n_cell_x)), dtype=int
).tolist()
half_x = half
else:
cnt_x = [np.ceil(shape[1] / 2)]
half_x = np.ceil(shape[1] / 2)
if n_cell_y > 0:
cnt_y = np.linspace(
half, shape[0] - half, 2 + int(np.round(n_cell_y)), dtype=int
).tolist()
half_y = half
else:
cnt_y = [np.ceil(shape[0] / 2)]
half_y = np.ceil(shape[0] / 2)
coords = []
for cx in cnt_x:
for cy in cnt_y:
i_min, i_max = int(cy - half_y), int(cy + half_y)
j_min, j_max = int(cx - half_x), int(cx + half_x)
lines = probabilistic_hough_line(
edges[i_min:i_max, j_min:j_max],
line_length=self.line_length.value,
threshold=self.threshold.value,
line_gap=self.line_gap.value,
seed=0,
)
if np.any(lines):
coord = np.vstack(lines)
coords.append(
np.c_[
x[i_min:i_max, j_min:j_max][
coord[:, 1], coord[:, 0]
],
y[i_min:i_max, j_min:j_max][
coord[:, 1], coord[:, 0]
],
z[i_min:i_max, j_min:j_max][
coord[:, 1], coord[:, 0]
],
]
)
if coords:
coord = np.vstack(coords)
self.objects.lines = coord
self.plot_store_lines()
else:
self.objects.lines = None
self.compute.value = False
def plot_store_lines(self):
xy = self.objects.lines
indices_1 = filter_xy(
xy[1::2, 0],
xy[1::2, 1],
self.resolution.value,
window={
"center": [
self.center_x.value,
self.center_y.value,
],
"size": [
self.width.value,
self.height.value,
],
"azimuth": self.azimuth.value,
},
)
indices_2 = filter_xy(
xy[::2, 0],
xy[::2, 1],
self.resolution.value,
window={
"center": [
self.center_x.value,
self.center_y.value,
],
"size": [
self.width.value,
self.height.value,
],
"azimuth": self.azimuth.value,
},
)
indices = np.kron(
np.any(np.c_[indices_1, indices_2], axis=1),
np.ones(2),
).astype(bool)
xy = self.objects.lines[indices, :2]
self.collections = [
collections.LineCollection(
np.reshape(xy, (-1, 2, 2)), colors="k", linewidths=2
)
]
self.refresh.value = False
self.refresh.value = True # Trigger refresh
if np.any(xy):
vertices = np.vstack(self.objects.lines[indices, :])
cells = np.arange(vertices.shape[0]).astype("uint32").reshape((-1, 2))
if np.any(cells):
self.trigger.vertices = vertices
self.trigger.cells = cells
else:
self.trigger.vertices = None
self.trigger.cells = None
|
py | b404bd20e47dd819830fd9ad1b448f693e547adc | from __future__ import with_statement
try:
import cProfile as profile
except ImportError: # pragma: no cover
try:
import profile
except ImportError: # pragma: no cover
profile = None
try:
import resource
except ImportError: # pragma: no cover
resource = None # Will fail on Win32 systems
try:
import pstats
except ImportError: # pragma: no cover
pstats = None # will fail on braindead Debian systems that package pstats
# separately from python for god-knows-what-reason
import threading
import time
from pyramid_debugtoolbar.panels import DebugPanel
from pyramid_debugtoolbar.utils import format_fname
_ = lambda x: x
lock = threading.Lock()
class PerformanceDebugPanel(DebugPanel):
"""
Panel that looks at the performance of a request.
It will display the time a request took and, optionally, the
cProfile output.
"""
name = 'performance'
user_activate = True
stats = None
function_calls = None
has_resource = bool(resource)
has_content = bool(pstats and profile)
template = 'pyramid_debugtoolbar.panels:templates/performance.dbtmako'
title = _('Performance')
nav_title = title
def __init__(self, request):
if profile is not None:
self.profiler = profile.Profile()
def _wrap_timer_handler(self, handler):
if self.has_resource:
def resource_timer_handler(request):
_start_time = time.time()
self._start_rusage = resource.getrusage(resource.RUSAGE_SELF)
try:
result = handler(request)
except:
raise
finally:
self._end_rusage = resource.getrusage(resource.RUSAGE_SELF)
self.total_time = (time.time() - _start_time) * 1000
return result
return resource_timer_handler
def noresource_timer_handler(request):
_start_time = time.time()
try:
result = handler(request)
except:
raise
finally:
self.total_time = (time.time() - _start_time) * 1000
return result
return noresource_timer_handler
def _wrap_profile_handler(self, handler):
if not self.is_active:
return handler
def profile_handler(request):
with lock:
try:
result = self.profiler.runcall(handler, request)
except:
raise
finally:
stats = pstats.Stats(self.profiler)
function_calls = []
flist = stats.sort_stats('cumulative').fcn_list
for func in flist:
current = {}
info = stats.stats[func]
# Number of calls
if info[0] != info[1]:
current['ncalls'] = '%d/%d' % (info[1], info[0])
else:
current['ncalls'] = info[1]
# Total time
current['tottime'] = info[2] * 1000
# Quotient of total time divided by number of calls
if info[1]:
current['percall'] = info[2] * 1000 / info[1]
else:
current['percall'] = 0
# Cumulative time
current['cumtime'] = info[3] * 1000
# Quotient of the cumulative time divded by the number
# of primitive calls.
if info[0]:
current['percall_cum'] = info[3] * 1000 / info[0]
else:
current['percall_cum'] = 0
# Filename
filename = pstats.func_std_string(func)
current['filename_long'] = filename
current['filename'] = format_fname(filename)
function_calls.append(current)
self.stats = stats
self.function_calls = function_calls
return result
return profile_handler
def wrap_handler(self, handler):
handler = self._wrap_profile_handler(handler)
handler = self._wrap_timer_handler(handler)
return handler
@property
def nav_subtitle(self):
return '%0.2fms' % (self.total_time)
def _elapsed_ru(self, name):
return getattr(self._end_rusage, name) - getattr(self._start_rusage,
name)
def process_response(self, response):
vars = {'timing_rows': None, 'stats': None, 'function_calls': []}
if self.has_resource:
utime = 1000 * self._elapsed_ru('ru_utime')
stime = 1000 * self._elapsed_ru('ru_stime')
vcsw = self._elapsed_ru('ru_nvcsw')
ivcsw = self._elapsed_ru('ru_nivcsw')
# minflt = self._elapsed_ru('ru_minflt')
# majflt = self._elapsed_ru('ru_majflt')
# these are documented as not meaningful under Linux. If you're
# running BSD feel free to enable them, and add any others that I
# hadn't gotten to before I noticed that I was getting nothing but
# zeroes and that the docs agreed. :-(
# blkin = self._elapsed_ru('ru_inblock')
# blkout = self._elapsed_ru('ru_oublock')
# swap = self._elapsed_ru('ru_nswap')
# rss = self._end_rusage.ru_maxrss
# srss = self._end_rusage.ru_ixrss
# urss = self._end_rusage.ru_idrss
# usrss = self._end_rusage.ru_isrss
# TODO l10n on values
rows = (
(_('User CPU time'), '%0.3f msec' % utime),
(_('System CPU time'), '%0.3f msec' % stime),
(_('Total CPU time'), '%0.3f msec' % (utime + stime)),
(_('Elapsed time'), '%0.3f msec' % self.total_time),
(_('Context switches'), '%d voluntary, %d involuntary' % (
vcsw, ivcsw)),
# (_('Memory use'), '%d max RSS, %d shared, %d unshared' % (
# rss, srss, urss + usrss)),
# (_('Page faults'), '%d no i/o, %d requiring i/o' % (
# minflt, majflt)),
# (_('Disk operations'), '%d in, %d out, %d swapout' % (
# blkin, blkout, swap)),
)
vars['timing_rows'] = rows
if self.is_active:
vars['stats'] = self.stats
vars['function_calls'] = self.function_calls
self.data = vars
def includeme(config):
config.add_debugtoolbar_panel(PerformanceDebugPanel)
|
py | b404bd2f0c78dce57642841e67039c17d2bdaffe | #!/bin/env python
'''
Convert macOS keyboard layout files (.keylayout) to
equivalent Windows files (.klc).
'''
import os
import re
import sys
import time
import argparse
import codecs
import unicodedata
import xml.etree.ElementTree as ET
# local modules
from data.klc_data import (
win_to_mac_keycodes, win_keycodes,
klc_keynames, klc_prologue_dummy, klc_epilogue_dummy
)
from data.locale_data import (
locale_id, locale_id_long, locale_tag, locale_name, locale_name_long,
)
error_msg_conversion = (
'Could not convert composed character {}, '
'inserting replacement character ({}).'
)
error_msg_filename = (
'Too many digits for a Windows-style (8+3) filename. '
'Please rename the source file.')
error_msg_macwin_mismatch = (
"// No equivalent macOS code for Windows code {} ('{}'). Skipping.")
error_msg_winmac_mismatch = (
"// Could not match Windows code {} ('{}') to Mac OS code {}. Skipping.")
# Change the line separator.
# This is important, as the output klc file must be UTF-16 LE with
# Windows-style line breaks.
os.linesep = '\r\n'
# Placeholder character for replacing 'ligatures' (more than one character
# mapped to one key), which are not supported by this conversion script.
replacement_char = '007E'
class KeylayoutParser(object):
def __init__(self, tree):
# raw keys as they are in the layout XML
self.key_list = []
# raw list of actions collected from layout XML
self.action_list = []
# key output when state is None
self.output_list = []
# action IDs and actual base keys (e.g. 'a', 'c' etc.)
self.action_basekeys = {}
# {states: deadkeys}
self.deadkeys = {}
# {deadkey: (basekey, output)}
self.deadkey_dict = {}
# A dict of dicts, collecting the outputs of every key
# in each individual state.
self.output_dict = {}
# Actions that do not yield immediate output, but shift to a new state.
self.empty_actions = []
# {keymap ID: modifier key}
self.keymap_assignments = {}
self.number_of_keymaps = 0
self.parse(tree)
self.find_deadkeys()
self.match_actions()
self.find_outputs()
self.make_deadkey_dict()
self.make_output_dict()
def check_states(self, states, keymap, maxset, minset, mod_name):
'''
Assign index numbers to the different shift states, by comparing
them to the minimum and maximum possible modifier configurations.
This is necessary as the arrangement in the Mac keyboard layout
is arbitrary.
'''
if maxset.issuperset(states) and minset.issubset(states):
self.keymap_assignments[mod_name] = int(keymap)
def parse(self, tree):
keymap_idx_list = [] # Find the number of keymap indexes.
default_max = {'command?', 'caps?'}
default_min = set()
alt_max = {'anyOption', 'caps?', 'command?'}
alt_min = {'anyOption'}
shift_max = {'anyShift', 'caps?', 'command?'}
shift_min = {'anyShift'}
altshift_max = {'anyShift', 'anyOption', 'caps?', 'command?'}
altshift_min = {'anyShift', 'anyOption'}
cmd_max = {'command', 'caps?', 'anyShift?', 'anyOption?'}
cmd_min = {'command'}
caps_max = {'caps', 'anyShift?', 'command?'}
caps_min = {'caps'}
cmdcaps_max = {'command', 'caps', 'anyShift?'}
cmdcaps_min = {'command', 'caps'}
shiftcaps_max = {'anyShift', 'caps', 'anyOption?'}
shiftcaps_min = {'anyShift', 'caps'}
for parent in tree.iter():
if parent.tag == 'keyMapSelect':
for modifier in parent:
keymap_index = int(parent.get('mapIndex'))
keymap_idx_list.append(keymap_index)
keymap = parent.get('mapIndex')
states = set(modifier.get('keys').split())
self.check_states(
states, keymap, default_max, default_min, 'default')
self.check_states(
states, keymap, shift_max, shift_min, 'shift')
self.check_states(
states, keymap, alt_max, alt_min, 'alt')
self.check_states(
states, keymap, altshift_max, altshift_min, 'altshift')
self.check_states(
states, keymap, cmd_max, cmd_min, 'cmd')
self.check_states(
states, keymap, caps_max, caps_min, 'caps')
self.check_states(
states, keymap, cmdcaps_max, cmdcaps_min, 'cmdcaps')
self.check_states(
states, keymap,
shiftcaps_max, shiftcaps_min, 'shiftcaps')
if parent.tag == 'keyMapSet':
keymapset_id = parent.attrib['id']
for keymap in parent:
keymap_index = int(keymap.attrib['index'])
for key in keymap:
key_code = int(key.attrib['code'])
if key.get('action') is None:
key_type = 'output'
else:
key_type = 'action'
output = key.get(key_type)
self.key_list.append([
keymapset_id, keymap_index,
key_code, key_type, output])
if parent.tag == 'actions':
for action in parent:
action_id = action.get('id')
for action_trigger in action:
if action_trigger.get('next') is None:
action_type = 'output'
else:
action_type = 'next'
state = action_trigger.get('state')
# result can be a code point or another state
result = action_trigger.get(action_type)
self.action_list.append([
action_id, state, action_type, result])
# Make a dictionary for key id to output.
# On the Mac keyboard, the 'a' for example is often
# matched to an action, as it can produce
# agrave, aacute, etc.
if [state, action_type] == ['none', 'output']:
self.action_basekeys[action_id] = result
# Yield the highest index assigned to a shift state - thus, the
# number of shift states in the layout.
self.number_of_keymaps = max(keymap_idx_list)
def find_deadkeys(self):
'''
Populate dictionary self.deadkeys which contains the state ID
and the code point of an actual dead key.
(for instance, '3': '02c6' state 3: circumflex)
Populate list of IDs for 'empty' actions, for finding IDs of all key
inputs that have no immediate output.
This list is used later when an '@' is appended to the code points,
a Windows convention to mark dead keys.
'''
deadkey_id = 0
key_list = []
for [key_id, state, key_type, result] in self.action_list:
if [state, key_type, result] == ['none', 'output', '0020']:
deadkey_id = key_id
if key_id == deadkey_id and result != '0020':
self.deadkeys[state] = result
if [state, key_type] == ['none', 'next']:
key_list.append([key_id, result])
self.empty_actions.append(key_id)
key_list_2 = []
for state, result_state in key_list:
if result_state in self.deadkeys.keys():
cp_result = self.deadkeys[result_state]
key_list_2.append((state, cp_result))
# Add the actual deadkeys (grave, acute etc)
# to the dict action_basekeys
self.action_basekeys.update(dict(key_list_2))
def match_actions(self):
'''
Extend self.action_list is extended by the base character, e.g.
[
'6', # action id
's1', # state
'output', # type
'00c1', # Á
'0041', # A
]
Populate self.action_basekeys -- all the glyphs that can be combined
with a dead key, e.g. A,E,I etc.
'''
for action_data in self.action_list:
key_id, state, key_type, result = action_data
if [state, key_type] == ['none', 'output']:
self.action_basekeys[key_id] = result
if key_id in self.action_basekeys.keys():
action_data.append(self.action_basekeys[key_id])
def find_outputs(self):
'''
Find the real output values of all the keys, e.g. replacing the
action IDs in the XML keyboard layout with the code points they
return in their standard state.
'''
for key_data in self.key_list:
output = key_data[4]
if output in self.empty_actions:
# If the key is a real dead key, mark it.
# This mark is used in 'make_output_dict'.
key_data.append('@')
if output in self.action_basekeys:
key_data[3] = 'output'
key_data[4] = self.action_basekeys[output]
self.output_list.append(key_data)
else:
self.output_list.append(key_data)
def make_deadkey_dict(self):
'''
Populate self.deadkey_dict, which maps a deadkey
e.g. (02dc, circumflex) to (base character, accented character) tuples
e.g. 0041, 00c3 = A, Ã
'''
for action in self.action_list:
if action[1] in self.deadkeys.keys():
action.append(self.deadkeys[action[1]])
if len(action) == 6:
deadkey = action[5]
basekey = action[4]
result = action[3]
if deadkey in self.deadkey_dict:
self.deadkey_dict[deadkey].append((basekey, result))
else:
self.deadkey_dict[deadkey] = [(basekey, result)]
def make_output_dict(self):
'''
This script is configurated to work for the first keymap set of an
XML keyboard layout only.
Here, the filtering occurs:
'''
first_keymapset = self.output_list[0][0]
for key_data in self.output_list:
keymap_set = key_data[0]
keymap_id = key_data[1]
key_id = key_data[2]
if keymap_set != first_keymapset:
self.output_list.remove(key_data)
# filling the key ID output dict with dummy output
li = []
for i in range(self.number_of_keymaps + 1):
li.append([i, '-1'])
self.output_dict[key_id] = dict(li)
for key_data in self.output_list:
keymap_set = key_data[0]
keymap_id = key_data[1]
key_id = key_data[2]
if len(key_data) == 5:
output = key_data[4]
else:
# The @ is marking this key as a deadkey in .klc files.
output = key_data[4] + '@'
self.output_dict[key_id][keymap_id] = output
def get_key_output(self, key_output_dict, state):
'''
Used to find output per state, for every key.
If no output, return '-1' (a.k.a. not defined).
'''
try:
output = key_output_dict[self.keymap_assignments[state]]
except KeyError:
output = '-1'
return output
def get_key_table(self):
kt_output = []
for win_kc_hex, win_kc_name in sorted(win_keycodes.items()):
win_kc_int = int(win_kc_hex, 16)
if win_kc_int not in win_to_mac_keycodes:
print(error_msg_macwin_mismatch.format(
win_kc_int, win_keycodes[win_kc_hex]))
continue
mac_kc = win_to_mac_keycodes[win_kc_int]
if mac_kc not in self.output_dict:
print(error_msg_winmac_mismatch.format(
win_kc_int, win_keycodes[win_kc_hex], mac_kc))
continue
outputs = self.output_dict[mac_kc]
# The key_table follows the syntax of the .klc file.
# The columns are as follows:
# key_table[0]: scan code
# key_table[1]: virtual key
# key_table[2]: spacer (empty)
# key_table[3]: caps (on or off, or SGCaps flag)
# key_table[4]: output for default state
# key_table[5]: output for shift
# key_table[6]: output for ctrl (= cmd on mac)
# key_table[7]: output for ctrl-shift (= cmd-caps lock on mac)
# key_table[8]: output for altGr (= ctrl-alt)
# key_table[9]: output for altGr-shift (= ctrl-alt-shift)
# key_table[10]: descriptions.
key_table = list((win_kc_hex, win_kc_name)) + ([""] * 9)
default_output = self.get_key_output(outputs, 'default')
shift_output = self.get_key_output(outputs, 'shift')
alt_output = self.get_key_output(outputs, 'alt')
altshift_output = self.get_key_output(outputs, 'altshift')
caps_output = self.get_key_output(outputs, 'caps')
cmd_output = self.get_key_output(outputs, 'cmd')
cmdcaps_output = self.get_key_output(outputs, 'cmdcaps')
shiftcaps_output = self.get_key_output(outputs, 'shiftcaps')
# Check if the caps lock output equals the shift key,
# to set the caps lock status.
if caps_output == default_output:
key_table[3] = '0'
elif caps_output == shift_output:
key_table[3] = '1'
else:
# SGCaps are a Windows speciality, necessary if the caps lock
# state is different from shift.
# Usually, they accommodate an alternate writing system.
# SGCaps + Shift is possible, boosting the available
# shift states to 6.
key_table[3] = 'SGCap'
key_table[4] = default_output
key_table[5] = shift_output
key_table[6] = cmd_output
key_table[7] = cmdcaps_output
key_table[8] = alt_output
key_table[9] = altshift_output
key_table[10] = (
f'// {char_description(default_output)}, '
f'{char_description(shift_output)}, '
f'{char_description(cmd_output)}, '
f'{char_description(alt_output)}, '
f'{char_description(altshift_output)}') # key descriptions
kt_output.append('\t'.join(key_table))
if key_table[3] == 'SGCap':
kt_output.append((
f'-1\t-1\t\t0\t{caps_output}\t'
f'{shiftcaps_output}\t\t\t\t\t'
f'// {char_description(caps_output)}, '
f'{char_description(shiftcaps_output)}'))
return kt_output
def get_deadkey_table(self):
'''
Summary of dead keys, and their results in all intended combinations.
'''
dk_table = ['']
for cp_dead, base_result_list in self.deadkey_dict.items():
# we want the space character to be last in the list,
# otherwise MSKLC complains (not sure if consequential)
sorted_base_result_list = sorted(
base_result_list, key=lambda x: int(x[0], 16), reverse=True)
dk_table.extend([''])
dk_table.append(f'DEADKEY\t{cp_dead}')
dk_table.append('')
for cp_base, cp_result in sorted_base_result_list:
char_base = char_from_hex(cp_base)
char_result = char_from_hex(cp_result)
line = (
f'{cp_base}\t{cp_result}\t'
f'// {char_base} -> {char_result}')
dk_table.append(line)
return dk_table
def get_keyname_dead(self):
'''
List of dead keys contained in the klc keyboard layout.
'''
list_keyname_dead = ['', 'KEYNAME_DEAD', '']
# for codepoint in sorted(self.deadkeys.values()):
for codepoint in self.deadkeys.values():
list_keyname_dead.append(
f'{codepoint}\t"{char_description(codepoint)}"')
list_keyname_dead.append('')
if len(list_keyname_dead) == 4:
# no deadkeys, no KEYNAME_DEAD list
return ['', '']
else:
return list_keyname_dead
def read_file(path):
'''
Read a file, make list of the lines, close the file.
'''
with open(path, 'r', encoding='utf-8') as f:
data = f.read().splitlines()
return data
def codepoint_from_char(character):
'''
Return a 4 or 5-digit Unicode hex string for the passed character.
'''
try:
return '{0:04x}'.format(ord(character))
# For now, 'ligatures' (2 or more code points assigned to one key)
# are not supported in this conversion script.
# Ligature support on Windows keyboards is spotty (no ligatures in
# Caps Lock states, for instance), and limited to four code points
# per key. Used in very few keyboard layouts only, the decision was
# made to insert a placeholder instead.
except TypeError:
print(error_msg_conversion.format(
character, char_description(replacement_char)))
return replacement_char
def char_from_hex(hex_string):
'''
Return character from a Unicode code point.
'''
return chr(int(hex_string, 16))
def char_description(hex_string):
'''
Return description of characters, e.g. 'DIGIT ONE', 'EXCLAMATION MARK' etc.
'''
if hex_string in ['-1', '']:
return '<none>'
hex_string = hex_string.rstrip('@')
try:
return unicodedata.name(char_from_hex(hex_string))
except ValueError:
return 'PUA {}'.format(hex_string)
def filter_xml(input_keylayout):
'''
Filter xml-based .keylayout file.
Unicode entities (�) make the ElementTree xml parser choke,
that’s why some replacement operations are necessary.
Also, all literal output characters are converted to code points
(0000, ffff, 1ff23 etc) for easier handling downstream.
'''
rx_uni_lig = re.compile(r'((&#x[a-fA-F0-9]{4};){2,})')
rx_hex_escape = re.compile(r'&#x([a-fA-F0-9]{4,6});')
rx_output_line = re.compile(r'(output=[\"\'])(.+?)([\"\'])')
# Fixing the first line to make ElementTree not stumble
# over a capitalized XML tag
filtered_xml = ['<?xml version="1.0" encoding="UTF-8"?>']
for line in read_file(input_keylayout)[1:]:
if re.search(rx_output_line, line):
if re.search(rx_uni_lig, line):
# More than 1 output character.
# Not supported, so fill in replacement char instead.
lig_characters = re.search(rx_uni_lig, line).group(1)
print(error_msg_conversion.format(
lig_characters, char_description(replacement_char)))
line = re.sub(rx_uni_lig, replacement_char.lower(), line)
elif re.search(rx_hex_escape, line):
# Escaped code point, e.g.  
# Remove everything except the code point.
query = re.search(rx_hex_escape, line)
codepoint = query.group(1).lower()
line = re.sub(rx_hex_escape, codepoint, line)
else:
# Normal character output.
# Replace the character by a code point
query = re.search(rx_output_line, line)
char_pre = query.group(1) # output="
character = query.group(2)
codepoint = codepoint_from_char(character).lower()
char_suff = query.group(3) # "
replacement_line = ''.join((char_pre, codepoint, char_suff))
line = re.sub(rx_output_line, replacement_line, line)
filtered_xml.append(line)
return '\n'.join(filtered_xml)
def make_klc_filename(keyboard_name):
'''
Windows .dll files allow for 8-character file names only, which is why the
output file name is truncated. If the input file name contains a number
(being part of a series), this number is appended to the end of the output
file name. If this number is longer than 8 digits, the script will gently
ask to modify the input file name.
Periods and spaces in the file name are not supported; MSKLC will not
build the .dll if the .klc has any.
This is why they are stripped here.
'''
# strip periods and spaces
filename = re.sub(r'[. ]', '', keyboard_name)
# find digit(s) at tail of file name
rx_digit = re.compile(r'(\d+?)$')
match_digit = rx_digit.search(filename)
if match_digit:
trunc = 8 - len(match_digit.group(1)) - 1
if trunc < 0:
print(error_msg_filename)
sys.exit(-1)
else:
filename = '{}_{}.klc'.format(
filename[:trunc], match_digit.group(1))
else:
filename = '{}.klc'.format(filename[:8])
return filename
def process_input_keylayout(input_keylayout):
filtered_xml = filter_xml(input_keylayout)
tree = ET.XML(filtered_xml)
keyboard_data = KeylayoutParser(tree)
return keyboard_data
def make_keyboard_name(input_path):
'''
Return the base name of the .keylayout file
'''
input_file = os.path.basename(input_path)
return os.path.splitext(input_file)[0]
def verify_input_file(parser, input_file):
'''
Check if the input file exists, and if the suffix is .keylayout
https://stackoverflow.com/a/15203955
'''
if not os.path.exists(input_file):
parser.error('This input file does not exist')
suffix = os.path.splitext(input_file)[-1]
if suffix.lower() != '.keylayout':
parser.error('Please use a xml-based .keylayout file')
return input_file
def make_klc_prologue(keyboard_name):
# company = 'Adobe Systems Incorporated'
company = 'myCompany'
year = time.localtime()[0]
return klc_prologue_dummy.format(
locale_tag, keyboard_name, year, company, company,
locale_name, locale_id_long)
def make_klc_epilogue(keyboard_name):
return klc_epilogue_dummy.format(
locale_id, keyboard_name, locale_id, locale_name_long)
def make_klc_data(keyboard_name, keyboard_data):
klc_prologue = make_klc_prologue(keyboard_name)
klc_epilogue = make_klc_epilogue(keyboard_name)
klc_data = []
klc_data.extend(klc_prologue.splitlines())
klc_data.extend(keyboard_data.get_key_table())
klc_data.extend(keyboard_data.get_deadkey_table())
klc_data.extend(klc_keynames)
klc_data.extend(keyboard_data.get_keyname_dead())
klc_data.extend(klc_epilogue.splitlines())
return klc_data
def get_args(args=None):
parser = argparse.ArgumentParser(
description=__doc__)
parser.add_argument(
'input',
type=lambda input_file: verify_input_file(parser, input_file),
help='input .keylayout file'
)
parser.add_argument(
'-o', '--output_dir',
help='output directory',
metavar='DIR',
)
return parser.parse_args(args)
def run(args):
input_file = args.input
if args.output_dir:
output_dir = args.output_dir
else:
output_dir = os.path.abspath(os.path.dirname(input_file))
keyboard_data = process_input_keylayout(input_file)
keyboard_name = make_keyboard_name(input_file)
klc_filename = make_klc_filename(keyboard_name)
klc_data = make_klc_data(keyboard_name, keyboard_data)
output_path = os.sep.join((output_dir, klc_filename))
with codecs.open(output_path, 'w', 'utf-16') as output_file:
for line in klc_data:
output_file.write(line)
output_file.write(os.linesep)
print(f'{keyboard_name} written to {klc_filename}')
if __name__ == '__main__':
args = get_args()
run(args)
|
py | b404bdd3c2bf165e4fef8a9d13610fefdf848036 | import pandas as pd
import numpy as np
import librosa
import glob
import os
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_log_error
import requests
import threading
import traceback
import time
from aiy.board import Board, Led
from aiy.voice.audio import AudioFormat, record_file, play_wav
from scipy.stats import mode
moodDic = {'alarm': 0, 'cry': 1,
'glass': 2, "gun": 3, "water": 4}
tree = {}
class TreeNode:
def __init__(self, left=None, right=None, split_fn=None, leaf_evaluate=None):
self.left = left
self.right = right
self.split_fn = split_fn
self.leaf_evaluate = leaf_evaluate
def is_leaf(self):
return self.left == None and self.right == None
def evaluate(self, X_i):
if self.is_leaf():
return self.leaf_evaluate()
if self.split_fn(X_i):
return self.left.evaluate(X_i)
else:
return self.right.evaluate(X_i)
class Leaf(TreeNode):
def __init__(self, label):
TreeNode.__init__(self, leaf_evaluate=lambda: label)
def H(y):
def proportion(val, y):
return (y == val).sum() / len(y)
unique = set(y)
return sum(-1 * proportion(val, y) * np.log2(proportion(val, y)) for val in unique)
def weighted_entropy(yes, no):
total_size = len(yes) + len(no)
return (len(yes) / total_size) * H(yes) + (len(no) / total_size) * H(no)
def train(X_train, Y_train, max_depth=None):
if len(Y_train) == 0:
return Leaf(0)
if len(set(Y_train)) == 1 or max_depth == 1:
return Leaf(mode(Y_train).mode)
def split_weighted_entropy(feature_idx, feature_value):
"""Calculate the weighted entropy of the split on feature <feature_idx>,
and on value <feature_value> """
feature = X_train[:, feature_idx]
yes = Y_train[feature > feature_value]
no = Y_train[feature <= feature_value]
return weighted_entropy(yes, no)
splits = np.zeros(X_train.shape)
for feature_idx in range(X_train.shape[1]):
# try to split on each X-value, no reason to try others
for i, feature_value in enumerate(X_train[:, feature_idx]):
splits[i, feature_idx] = split_weighted_entropy(
feature_idx, feature_value)
# find best split
max_idxs = X_train.argmax(axis=0)
for col, max_idx in enumerate(max_idxs):
splits[max_idx, col] = float('inf')
i = np.argmin(splits)
best_feature_idx = i % splits.shape[1]
best_feature_value = X_train[i // splits.shape[1], best_feature_idx]
yes = X_train[:, best_feature_idx] > best_feature_value
no = X_train[:, best_feature_idx] <= best_feature_value
# recurse and make decision trees on the yes and no sets
tree = TreeNode(
split_fn=lambda X_i: X_i[best_feature_idx] > best_feature_value,
left=train(X_train[yes], Y_train[yes], max_depth=max_depth -
1 if max_depth is not None else None),
right=train(X_train[no], Y_train[no], max_depth=max_depth -
1 if max_depth is not None else None)
)
return tree
def accuracy(y_pred, y_true):
return (y_pred == y_true).sum() / y_true.shape[0]
def predict(X, tree):
if len(X.shape) == 1:
X = X.reshape(1, -1)
preds = np.zeros(X.shape[0])
for i in range(X.shape[0]):
preds[i] = tree.evaluate(X[i])
return preds
def parser(label, fileName):
# function to load files and extract features
file_name = os.path.join('./sound-downloader/'+ str(label) + '/' + str(fileName))
# handle exception to check if there isn't a file which is corrupted
try:
# here kaiser_fast is a technique used for faster extraction
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# we extract mfcc feature from data
mfccs = np.mean(librosa.feature.mfcc(y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
feature = mfccs
except Exception as e:
print("Error encountered while parsing file: ", fileName)
return []
feature = [moodDic[label]] + list(feature)
return feature
def getFiles(mood):
filePath = os.path.join("./sound-downloader", str(mood))
files = list()
for i in os.listdir(filePath):
if i.endswith('.wav'):
files.append(i)
return files
def doTrainRoutine():
nonlocal tree
fullData = []
for j in moodDic.keys():
fList = getFiles(str(j))
fullData.append((j,fList))
baby = pd.DataFrame(fullData, columns = ['type', 'file']).set_index('type')
#curFile = baby.loc['belly_pain']['file'][0]
#x1 = parser('belly_pain', '549a46d8-9c84-430e-ade8-97eae2bef787-1430130772174-1.7-m-48-bp.wav')
#print(x1)
fileNum = 0
soundDic = {}
for label in baby.index:
for lst in baby.loc[label]:
for soundFile in lst:
features = parser(label, soundFile)
soundDic[fileNum] = features
fileNum += 1
sound = pd.DataFrame.from_dict(data = soundDic, orient = 'index',columns=['type'] + list(range(0, 40)))
X = sound.iloc[:, 1:].values
Y = sound.iloc[:, 0].values
X_train_sound, X_test_sound, Y_train_sound, Y_test_sound = train_test_split(
X, Y, test_size=0.2, random_state=0)
print(sound)
print(len(X_train_sound), len(Y_train_sound))
tree = train(X_train_sound, Y_train_sound)
preds = predict(X_test_sound, tree)
print(accuracy(preds,Y_test_sound))
def predictor(fileN):
file_name = os.path.join('./sound-downloader/testing/' + str(fileN))
# handle exception to check if there isn't a file which is corrupted
try:
# here kaiser_fast is a technique used for faster extraction
X, sample_rate = librosa.load(file_name, res_type='kaiser_fast')
# we extract mfcc feature from data
mfccs = np.mean(librosa.feature.mfcc(
y=X, sr=sample_rate, n_mfcc=40).T, axis=0)
feature = mfccs
except Exception as e:
print("Error encountered while parsing file: ", fileN)
return []
return predict(feature, tree)
# print('actual: gun, predicted:', predictor('gun2.wav'))
#########################################################
TEST_SOUND = '/usr/share/sounds/alsa/Front_Center.wav'
FILENAME = 'recording.wav'
def main():
print('Beginning training...')
doTrainRoutine()
print('TRAINING COMPLETE')
print('Playing test sound...')
play_wav(TEST_SOUND)
with Board() as board:
while True:
board.led.state = Led.OFF
print('Press button to start recording...')
board.button.wait_for_press()
board.led.state = Led.ON
done = threading.Event()
board.button.when_pressed = done.set
# def wait():
# start = time.monotonic()
# while not done.is_set():
# duration = time.monotonic() - start
# print('Recording: %.02f seconds [Press button to stop]' % duration)
# time.sleep(0.5)
record_file(AudioFormat.CD, filename=FILENAME, wait=wait(done), filetype='wav')
# run classifier
stateNum = predictor(FILENAME)
state = 'none'
for name, num in moodDic.items():
if num == stateNum:
state = name
print(state)
payload = {'type': state}
headers = {'Accept': 'application/json', 'Content-Type': 'application/json'}
r = requests.post('http://bigrip.ocf.berkeley.edu:5000/notify', json=payload, headers=headers)
print(r.status_code)
def wait(done):
def _helper():
start = time.monotonic()
while not done.is_set():
duration = time.monotonic() - start
print('Recording: %.02f seconds [Press button to stop]' % duration)
time.sleep(0.5)
return _helper
def loop():
print('Starting training:')
doTrainRoutine()
print('Training complete. Accepting audio...')
audioLoop()
def audioLoop():
threading.Timer(10.0, audioLoop).start()
print("Accepting audio...")
if(os.path.exists('./sound-downloader/testing/recording.wav')):
print('File found. Processing...')
print(predictor('recording.wav'))
printit()
if __name__ == '__main__':
try:
main()
except Exception:
traceback.print_exc()
|
py | b404be9a98dc315787fae9cdbf271271801d703f | # -*- coding: utf-8 -*-
# Scrapy settings for weiboZ project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# http://doc.scrapy.org/en/latest/topics/settings.html
# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
USER_AGENTS = [
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; AcooBrowser; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Acoo Browser; SLCC1; .NET CLR 2.0.50727; Media Center PC 5.0; .NET CLR 3.0.04506)",
"Mozilla/4.0 (compatible; MSIE 7.0; AOL 9.5; AOLBuild 4337.35; Windows NT 5.1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows; U; MSIE 9.0; Windows NT 9.0; en-US)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Win64; x64; Trident/5.0; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 2.0.50727; Media Center PC 6.0)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 7.0b; Windows NT 5.2; .NET CLR 1.1.4322; .NET CLR 2.0.50727; InfoPath.2; .NET CLR 3.0.04506.30)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN) AppleWebKit/523.15 (KHTML, like Gecko, Safari/419.3) Arora/0.3 (Change: 287 c9dfb30)",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527+ (KHTML, like Gecko, Safari/419.3) Arora/0.6",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.2pre) Gecko/20070215 K-Ninja/2.1.1",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; zh-CN; rv:1.9) Gecko/20080705 Firefox/3.0 Kapiko/3.0",
"Mozilla/5.0 (X11; Linux i686; U;) Gecko/20070322 Kazehakase/0.4.5",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Fedora/1.9.0.8-1.fc10 Kazehakase/0.5.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
]
# 代理地址,可以去http://www.ip181.com/找,及时更新验证
PROXIES = ["http://202.44.109.56:8080", "http://101.4.138.34:8888", "http://202.105.131.231:80", "http://192.169.175.145:8080", "http://119.115.79.1:8080", "http://59.37.161.126:80", "http://160.202.41.138:8080", "http://118.144.154.253:3128", "http://218.92.220.64:8080", "http://5.189.169.210:8080", "http://218.24.17.5:8888", "http://211.35.179.37:8080", "http://187.95.116.10:3128", "http://103.28.160.86:8081", "http://103.214.173.191:8080", "http://125.39.78.132:8888", "http://27.148.151.178:80", "http://175.181.118.143:8080", "http://36.42.32.117:8080", "http://210.13.73.133:8080",
"http://46.232.165.223:8080", "http://149.56.13.5:5555", "http://210.150.90.210:8080", "http://210.51.35.209:80", "http://201.204.47.70:8080", "http://212.174.6.68:3128", "http://210.242.179.103:8080", "http://218.14.121.229:9000", "http://115.217.251.4:8888", "http://200.29.191.149:3128", "http://200.123.9.123:8080", "http://210.175.248.164:8080", "http://187.108.32.75:3128", "http://110.77.234.105:8080", "http://190.63.172.206:8080", "http://95.80.98.41:8080", "http://197.33.174.217:8080", "http://27.148.151.175:80", "http://210.136.117.169:8080", "http://210.116.112.109:80", ]
BOT_NAME = 'weiboZ'
SPIDER_MODULES = ['weiboZ.spiders']
NEWSPIDER_MODULE = 'weiboZ.spiders'
LOG_LEVEL = 'INFO'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'weiboZ (+http://www.yourdomain.com)'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
MONGO_URI = 'mongodb://localhost:27017/'
MONGO_DATABASE = {
'db': 'weibo', # db name
'col': 'house' # collection name
}
# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32
# Configure a delay for requests for the same website (default: 0)
# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False
# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.8,en-US;q=0.5,en;q=0.3',
'User-Agent:': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/535.20 (KHTML, like Gecko) Chrome/19.0.1036.7 Safari/535.20",
}
# Enable or disable spider middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
# 'weiboZ.middlewares.MyCustomSpiderMiddleware': 543,
#}
# Enable or disable downloader middlewares
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
'weiboZ.middlewares.DBDownloaderMiddleware.RandomUserAgent': 543,
'scrapy.downloadermiddleware.useragent.UserAgentMiddleware': None,
# 代理,默认关闭
#'weiboZ.middlewares.DBDownloaderMiddleware.ProxyMiddleware': 543
}
# Enable or disable extensions
# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
# EXTENSIONS = {
# 'scrapy.extensions.telnet.TelnetConsole': None,
#}
# Configure item pipelines
# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
#'weiboZ.pipelines.weiboMongoPipeline': 300,
#'weiboZ.pipelines.JsonPipeline': 300,
'weiboZ.pipelines.dbMongoPipeline': 300
}
# Enable and configure the AutoThrottle extension (disabled by default)
# See http://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
|
py | b404bead4616cb754c902ad7c4af7175c97daf98 | """
Nyzo String for publicIdentifier
"""
from nyzostrings.nyzostring import NyzoString
__version__ = "0.0.1"
class NyzoStringPublicIdentifier(NyzoString):
def __init__(self, identifier: bytes) -> None:
super().__init__('id__', identifier)
def get_identifier(self) -> bytes:
return self.bytes_content
@classmethod
def from_hex(cls, hex_string: str) -> "NyzoStringPublicIdentifier":
filtered_string = hex_string.replace('-', '')[:64]
return NyzoStringPublicIdentifier(bytes.fromhex(filtered_string))
|
py | b404befca034ac7a35c572d36962c690f05253e1 |
def randIndex(truth, predicted):
"""
The function is to measure similarity between two label assignments
truth: ground truth labels for the dataset (1 x 1496)
predicted: predicted labels (1 x 1496)
"""
if len(truth) != len(predicted):
print "different sizes of the label assignments"
return -1
elif (len(truth) == 1):
return 1
sizeLabel = len(truth)
agree_same = 0
disagree_same = 0
count = 0
for i in range(sizeLabel-1):
for j in range(i+1,sizeLabel):
if ((truth[i] == truth[j]) and (predicted[i] == predicted[j])):
agree_same += 1
elif ((truth[i] != truth[j]) and (predicted[i] != predicted[j])):
disagree_same +=1
count += 1
return (agree_same+disagree_same)/float(count)
# Code Sample
import scipy.cluster.hierarchy as sch
import numpy as np
import pylab as pl
# Plot dendogram and cut the tree to find resulting clusters
fig = pl.figure()
data = np.array([[1,2,3],[1,1,1],[5,5,5]])
datalable = ['first','second','third']
hClsMat = sch.linkage(data, method='complete') # Complete clustering
sch.dendrogram(hClsMat, labels= datalable, leaf_rotation = 45)
fig.savefig("thing.pdf")
resultingClusters = sch.fcluster(hClsMat,t= 3, criterion = 'distance')
print resultingClusters
# Your code starts from here ....
# 1.
# Scaling min max
# STUDENT CODE TODO
# 2.
# K-means http://scikit-learn.org/stable/modules/generated/sklearn.cluster.KMeans.html
# STUDENT CODE TODO
# 3.
# Compute Rand Index
# STUDENT CODE TODO
# 4.
# Examining K-mean objective
# STUDENT CODE TODO
# 5.
# Dendogram plot
# Dendogram - http://docs.scipy.org/doc/scipy-0.13.0/reference/generated/scipy.cluster.hierarchy.dendrogram.html
# Linkage - http://docs.scipy.org/doc/scipy-0.13.0/reference/generated/scipy.cluster.hierarchy.linkage.html
# STUDENT CODE TODO
# 6.
# Hierarchical clustering
# SciPy's Cluster - http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.fcluster.html#scipy.cluster.hierarchy.fcluster
# STUDENT CODE TODO
# 7.
# K-means for Sub-cluster
# STUDENT CODE TODO
|
py | b404c0a8505647a61b86f036f2e567cedf31c996 | # Copyright (C) 2016 Nokia Corporation and/or its subsidiary(-ies).
import json
from unittest import TestCase
from deployment.websocket import DeployerWebSocket, WebSocketEvent, WebSocketWorker, ServerStopped
try:
import unittest.mock as mock
except ImportError as e:
import mock
class _MockWebSocketMessage(object):
def __init__(self, data, is_binary):
self.data = data
self.is_binary = is_binary
class TestDeployerWebsocket(TestCase):
def setUp(self):
self.ws = DeployerWebSocket(mock.MagicMock())
self.server = mock.MagicMock()
self.ws.register_observer(self.server)
def test_received_message(self):
data = {"data": "data"}
self.ws.received_message(_MockWebSocketMessage(json.dumps(data), False))
self.server.notify.assert_called_with(data, self.ws)
def test_notify(self):
event = WebSocketEvent("event.type", {'key': 'value'})
event_2 = WebSocketEvent("event.type", {'environment_id': 1})
with mock.patch.object(self.ws, 'send') as send_method:
self.ws.notify(event)
self.ws.notify(event_2)
self.assertEquals(0, send_method.call_count)
self.ws.forward_events_matching(1)
self.ws.notify(event_2)
send_method.assert_called_with(json.dumps(event_2.to_dict()))
def test_stop_forwarding_events(self):
event = WebSocketEvent("event.type", {'environment_id': 1})
with mock.patch.object(self.ws, 'send') as send_method:
self.ws.forward_events_matching(1)
self.ws.stop_forwarding_events_matching(1)
self.ws.notify(event)
self.assertEquals(0, send_method.call_count)
|
py | b404c0d48a157bd1af286d83627cdb770482db9b | # coding: utf-8
"""
UbiOps
Client Library to interact with the UbiOps API. # noqa: E501
The version of the OpenAPI document: v2.1
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from ubiops.configuration import Configuration
class DeploymentVersionCreate(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'version': 'str',
'language': 'str',
'memory_allocation': 'int',
'instance_type': 'str',
'maximum_instances': 'int',
'minimum_instances': 'int',
'maximum_idle_time': 'int',
'description': 'str',
'labels': 'dict(str, str)',
'monitoring': 'str',
'request_retention_time': 'int',
'request_retention_mode': 'str',
'deployment_mode': 'str',
'default_notification_group': 'str'
}
attribute_map = {
'version': 'version',
'language': 'language',
'memory_allocation': 'memory_allocation',
'instance_type': 'instance_type',
'maximum_instances': 'maximum_instances',
'minimum_instances': 'minimum_instances',
'maximum_idle_time': 'maximum_idle_time',
'description': 'description',
'labels': 'labels',
'monitoring': 'monitoring',
'request_retention_time': 'request_retention_time',
'request_retention_mode': 'request_retention_mode',
'deployment_mode': 'deployment_mode',
'default_notification_group': 'default_notification_group'
}
def __init__(self, version=None, language='python3.7', memory_allocation=None, instance_type=None, maximum_instances=None, minimum_instances=None, maximum_idle_time=None, description=None, labels=None, monitoring=None, request_retention_time=None, request_retention_mode='full', deployment_mode='express', default_notification_group=None, local_vars_configuration=None): # noqa: E501
"""DeploymentVersionCreate - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._version = None
self._language = None
self._memory_allocation = None
self._instance_type = None
self._maximum_instances = None
self._minimum_instances = None
self._maximum_idle_time = None
self._description = None
self._labels = None
self._monitoring = None
self._request_retention_time = None
self._request_retention_mode = None
self._deployment_mode = None
self._default_notification_group = None
self.discriminator = None
self.version = version
if language is not None:
self.language = language
if memory_allocation is not None:
self.memory_allocation = memory_allocation
if instance_type is not None:
self.instance_type = instance_type
if maximum_instances is not None:
self.maximum_instances = maximum_instances
if minimum_instances is not None:
self.minimum_instances = minimum_instances
if maximum_idle_time is not None:
self.maximum_idle_time = maximum_idle_time
if description is not None:
self.description = description
self.labels = labels
self.monitoring = monitoring
if request_retention_time is not None:
self.request_retention_time = request_retention_time
if request_retention_mode is not None:
self.request_retention_mode = request_retention_mode
if deployment_mode is not None:
self.deployment_mode = deployment_mode
self.default_notification_group = default_notification_group
@property
def version(self):
"""Gets the version of this DeploymentVersionCreate. # noqa: E501
:return: The version of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this DeploymentVersionCreate.
:param version: The version of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and version is None: # noqa: E501
raise ValueError("Invalid value for `version`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
version is not None and not isinstance(version, str)):
raise ValueError("Parameter `version` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
version is not None and len(version) < 1):
raise ValueError("Invalid value for `version`, length must be greater than or equal to `1`") # noqa: E501
self._version = version
@property
def language(self):
"""Gets the language of this DeploymentVersionCreate. # noqa: E501
:return: The language of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._language
@language.setter
def language(self, language):
"""Sets the language of this DeploymentVersionCreate.
:param language: The language of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
language is not None and not isinstance(language, str)):
raise ValueError("Parameter `language` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
language is not None and len(language) < 1):
raise ValueError("Invalid value for `language`, length must be greater than or equal to `1`") # noqa: E501
self._language = language
@property
def memory_allocation(self):
"""Gets the memory_allocation of this DeploymentVersionCreate. # noqa: E501
:return: The memory_allocation of this DeploymentVersionCreate. # noqa: E501
:rtype: int
"""
return self._memory_allocation
@memory_allocation.setter
def memory_allocation(self, memory_allocation):
"""Sets the memory_allocation of this DeploymentVersionCreate.
:param memory_allocation: The memory_allocation of this DeploymentVersionCreate. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
memory_allocation is not None and not isinstance(memory_allocation, int)):
raise ValueError("Parameter `memory_allocation` must be an integer") # noqa: E501
self._memory_allocation = memory_allocation
@property
def instance_type(self):
"""Gets the instance_type of this DeploymentVersionCreate. # noqa: E501
:return: The instance_type of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._instance_type
@instance_type.setter
def instance_type(self, instance_type):
"""Sets the instance_type of this DeploymentVersionCreate.
:param instance_type: The instance_type of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
instance_type is not None and not isinstance(instance_type, str)):
raise ValueError("Parameter `instance_type` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
instance_type is not None and len(instance_type) < 1):
raise ValueError("Invalid value for `instance_type`, length must be greater than or equal to `1`") # noqa: E501
self._instance_type = instance_type
@property
def maximum_instances(self):
"""Gets the maximum_instances of this DeploymentVersionCreate. # noqa: E501
:return: The maximum_instances of this DeploymentVersionCreate. # noqa: E501
:rtype: int
"""
return self._maximum_instances
@maximum_instances.setter
def maximum_instances(self, maximum_instances):
"""Sets the maximum_instances of this DeploymentVersionCreate.
:param maximum_instances: The maximum_instances of this DeploymentVersionCreate. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
maximum_instances is not None and not isinstance(maximum_instances, int)):
raise ValueError("Parameter `maximum_instances` must be an integer") # noqa: E501
self._maximum_instances = maximum_instances
@property
def minimum_instances(self):
"""Gets the minimum_instances of this DeploymentVersionCreate. # noqa: E501
:return: The minimum_instances of this DeploymentVersionCreate. # noqa: E501
:rtype: int
"""
return self._minimum_instances
@minimum_instances.setter
def minimum_instances(self, minimum_instances):
"""Sets the minimum_instances of this DeploymentVersionCreate.
:param minimum_instances: The minimum_instances of this DeploymentVersionCreate. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
minimum_instances is not None and not isinstance(minimum_instances, int)):
raise ValueError("Parameter `minimum_instances` must be an integer") # noqa: E501
self._minimum_instances = minimum_instances
@property
def maximum_idle_time(self):
"""Gets the maximum_idle_time of this DeploymentVersionCreate. # noqa: E501
:return: The maximum_idle_time of this DeploymentVersionCreate. # noqa: E501
:rtype: int
"""
return self._maximum_idle_time
@maximum_idle_time.setter
def maximum_idle_time(self, maximum_idle_time):
"""Sets the maximum_idle_time of this DeploymentVersionCreate.
:param maximum_idle_time: The maximum_idle_time of this DeploymentVersionCreate. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
maximum_idle_time is not None and not isinstance(maximum_idle_time, int)):
raise ValueError("Parameter `maximum_idle_time` must be an integer") # noqa: E501
self._maximum_idle_time = maximum_idle_time
@property
def description(self):
"""Gets the description of this DeploymentVersionCreate. # noqa: E501
:return: The description of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this DeploymentVersionCreate.
:param description: The description of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
description is not None and not isinstance(description, str)):
raise ValueError("Parameter `description` must be a string") # noqa: E501
self._description = description
@property
def labels(self):
"""Gets the labels of this DeploymentVersionCreate. # noqa: E501
:return: The labels of this DeploymentVersionCreate. # noqa: E501
:rtype: dict(str, str)
"""
return self._labels
@labels.setter
def labels(self, labels):
"""Sets the labels of this DeploymentVersionCreate.
:param labels: The labels of this DeploymentVersionCreate. # noqa: E501
:type: dict(str, str)
"""
if (self.local_vars_configuration.client_side_validation and
labels is not None and not isinstance(labels, dict)):
raise ValueError("Parameter `labels` must be a dictionary") # noqa: E501
self._labels = labels
@property
def monitoring(self):
"""Gets the monitoring of this DeploymentVersionCreate. # noqa: E501
:return: The monitoring of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._monitoring
@monitoring.setter
def monitoring(self, monitoring):
"""Sets the monitoring of this DeploymentVersionCreate.
:param monitoring: The monitoring of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
monitoring is not None and not isinstance(monitoring, str)):
raise ValueError("Parameter `monitoring` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
monitoring is not None and len(monitoring) < 1):
raise ValueError("Invalid value for `monitoring`, length must be greater than or equal to `1`") # noqa: E501
self._monitoring = monitoring
@property
def request_retention_time(self):
"""Gets the request_retention_time of this DeploymentVersionCreate. # noqa: E501
:return: The request_retention_time of this DeploymentVersionCreate. # noqa: E501
:rtype: int
"""
return self._request_retention_time
@request_retention_time.setter
def request_retention_time(self, request_retention_time):
"""Sets the request_retention_time of this DeploymentVersionCreate.
:param request_retention_time: The request_retention_time of this DeploymentVersionCreate. # noqa: E501
:type: int
"""
if (self.local_vars_configuration.client_side_validation and
request_retention_time is not None and not isinstance(request_retention_time, int)):
raise ValueError("Parameter `request_retention_time` must be an integer") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
request_retention_time is not None and request_retention_time > 2.4192E+6): # noqa: E501
raise ValueError("Invalid value for `request_retention_time`, must be a value less than or equal to `2.4192E+6`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
request_retention_time is not None and request_retention_time < 3.6E+3): # noqa: E501
raise ValueError("Invalid value for `request_retention_time`, must be a value greater than or equal to `3.6E+3`") # noqa: E501
self._request_retention_time = request_retention_time
@property
def request_retention_mode(self):
"""Gets the request_retention_mode of this DeploymentVersionCreate. # noqa: E501
:return: The request_retention_mode of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._request_retention_mode
@request_retention_mode.setter
def request_retention_mode(self, request_retention_mode):
"""Sets the request_retention_mode of this DeploymentVersionCreate.
:param request_retention_mode: The request_retention_mode of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
request_retention_mode is not None and not isinstance(request_retention_mode, str)):
raise ValueError("Parameter `request_retention_mode` must be a string") # noqa: E501
allowed_values = ["none", "metadata", "full"] # noqa: E501
if self.local_vars_configuration.client_side_validation and request_retention_mode not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `request_retention_mode` ({0}), must be one of {1}" # noqa: E501
.format(request_retention_mode, allowed_values)
)
self._request_retention_mode = request_retention_mode
@property
def deployment_mode(self):
"""Gets the deployment_mode of this DeploymentVersionCreate. # noqa: E501
:return: The deployment_mode of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._deployment_mode
@deployment_mode.setter
def deployment_mode(self, deployment_mode):
"""Sets the deployment_mode of this DeploymentVersionCreate.
:param deployment_mode: The deployment_mode of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
deployment_mode is not None and not isinstance(deployment_mode, str)):
raise ValueError("Parameter `deployment_mode` must be a string") # noqa: E501
allowed_values = ["express", "batch"] # noqa: E501
if self.local_vars_configuration.client_side_validation and deployment_mode not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `deployment_mode` ({0}), must be one of {1}" # noqa: E501
.format(deployment_mode, allowed_values)
)
self._deployment_mode = deployment_mode
@property
def default_notification_group(self):
"""Gets the default_notification_group of this DeploymentVersionCreate. # noqa: E501
:return: The default_notification_group of this DeploymentVersionCreate. # noqa: E501
:rtype: str
"""
return self._default_notification_group
@default_notification_group.setter
def default_notification_group(self, default_notification_group):
"""Sets the default_notification_group of this DeploymentVersionCreate.
:param default_notification_group: The default_notification_group of this DeploymentVersionCreate. # noqa: E501
:type: str
"""
if (self.local_vars_configuration.client_side_validation and
default_notification_group is not None and not isinstance(default_notification_group, str)):
raise ValueError("Parameter `default_notification_group` must be a string") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
default_notification_group is not None and len(default_notification_group) < 1):
raise ValueError("Invalid value for `default_notification_group`, length must be greater than or equal to `1`") # noqa: E501
self._default_notification_group = default_notification_group
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, DeploymentVersionCreate):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, DeploymentVersionCreate):
return True
return self.to_dict() != other.to_dict()
|
py | b404c122427413eddd6df58a3a3bc0a46ab73fc4 | import scrapy
class Spider(scrapy.Spider):
name = "learnopencv"
start_urls = ['https://learnopencv.com/']
def parse(self, response):
ARTICLE_SELECTOR = '.entry-title a ::attr(href)'
for article_ref in response.css(ARTICLE_SELECTOR).extract():
yield scrapy.Request(
response.urljoin(article_ref),
callback=self.parse_article
)
NEXT_PAGE_SELECTOR = '.pagination-next a ::attr(href)'
next_page = response.css(NEXT_PAGE_SELECTOR).extract_first()
if next_page:
yield scrapy.Request(
response.urljoin(next_page),
callback=self.parse
)
def parse_article(self, response):
TITLE_SELECTOR = 'title ::text'
title = response.css(TITLE_SELECTOR)
CONTENT_SELECTOR = '.entry-content'
content = response.css(CONTENT_SELECTOR)
if content:
yield {
'link': response.url,
'name': title.extract_first(),
'article': content.extract_first(),
}
|
py | b404c1669dbfcba297e47fd8593606a3078ca576 | # coding: utf-8
"""
Onshape REST API
The Onshape REST API consumed by all clients. # noqa: E501
The version of the OpenAPI document: 1.113
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
import sys # noqa: F401
import six # noqa: F401
import nulltype # noqa: F401
from onshape_client.oas.model_utils import ( # noqa: F401
ModelComposed,
ModelNormal,
ModelSimple,
date,
datetime,
file_type,
int,
none_type,
str,
validate_get_composed_info,
)
class BTBodyTypeFilter112AllOf(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
("body_type",): {
"SOLID": "SOLID",
"SHEET": "SHEET",
"WIRE": "WIRE",
"POINT": "POINT",
"MATE_CONNECTOR": "MATE_CONNECTOR",
"COMPOSITE": "COMPOSITE",
"UNKNOWN": "UNKNOWN",
},
}
validations = {}
additional_properties_type = None
@staticmethod
def openapi_types():
"""
This must be a class method so a model may have properties that are
of type self, this ensures that we don't create a cyclic import
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
return {
"body_type": (str,), # noqa: E501
"bt_type": (str,), # noqa: E501
}
@staticmethod
def discriminator():
return None
attribute_map = {
"body_type": "bodyType", # noqa: E501
"bt_type": "btType", # noqa: E501
}
@staticmethod
def _composed_schemas():
return None
required_properties = set(
[
"_data_store",
"_check_type",
"_from_server",
"_path_to_item",
"_configuration",
]
)
def __init__(
self,
_check_type=True,
_from_server=False,
_path_to_item=(),
_configuration=None,
**kwargs
): # noqa: E501
"""bt_body_type_filter112_all_of.BTBodyTypeFilter112AllOf - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_from_server (bool): True if the data is from the server
False if the data is from the client (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
body_type (str): [optional] # noqa: E501
bt_type (str): [optional] # noqa: E501
"""
self._data_store = {}
self._check_type = _check_type
self._from_server = _from_server
self._path_to_item = _path_to_item
self._configuration = _configuration
for var_name, var_value in six.iteritems(kwargs):
if (
var_name not in self.attribute_map
and self._configuration is not None
and self._configuration.discard_unknown_keys
and self.additional_properties_type is None
):
# discard variable.
continue
setattr(self, var_name, var_value)
|
py | b404c16d30f4e0ab4aaa2aab6c95ccb4eba16d92 | __author__ = 'schlitzer'
# stdlib
import codecs
import copy
import json
import json.decoder
import logging
import os
import re
import threading
import time
class Worker(threading.Thread):
def __init__(
self, file, msgqueue, tags, regex, template,
syslog_facility, syslog_tag, syslog_severity,
encoding
):
super().__init__(name='Worker:'+file)
self.log = logging.getLogger('pylogchop')
self._fd = None
self._file = file
self._data = None
self._encoding = None
self._msgqueue = msgqueue
self._st_ino = None
self._st_dev = None
self._st_size = None
self._pos = None
self._regex = None
self._tags = None
self._tags_dict = None
self._template = None
self.encoding = encoding
self.template = template
self.regex = regex
self.syslog_facility = syslog_facility
self.syslog_tag = syslog_tag
self.syslog_severity = syslog_severity
self.tags = tags
self.tags_dict = tags
self.terminate = False
@property
def encoding(self):
return self._encoding
@encoding.setter
def encoding(self, encoding):
self._encoding = encoding
@property
def regex(self):
return self._regex
@regex.setter
def regex(self, regex):
if regex == '':
self._regex = None
else:
self._regex = re.compile(regex)
@property
def template(self):
return self._template
@template.setter
def template(self, template):
try:
with open(template, 'r') as f:
try:
self._template = json.load(f)
except json.decoder.JSONDecodeError as err:
self.log.fatal("could not parse template".format(err))
except OSError as err:
self.log.fatal("could not read template: {0}".format(err))
@property
def tags(self):
return self._tags
@tags.setter
def tags(self, tags):
self._tags = tags.split(',')
self.tags_dict = tags
@property
def tags_dict(self):
return self._tags_dict
@tags_dict.setter
def tags_dict(self, tags):
tags_dict = {}
tags = tags.split(',')
for tag in tags:
tag = tag.split(':', 1)
if len(tag) != 2:
self.log.error("cannot create k,v from tag {0}".format(tag))
continue
key, value = tag
tags_dict[key] = value
self._tags_dict = tags_dict
def _build_message(self, msg):
for key, value in msg.items():
if isinstance(value, dict):
self._build_message(msg[key])
elif isinstance(value, str):
if value == "$FIRST_LINE":
msg[key] = self._data['first_line']
elif value == "$OTHER_LINES":
msg[key] = self._data['other_lines']
elif value == "$TAGS":
msg[key] = self.tags
elif value == "$TAGS_DICT":
msg[key] = self.tags_dict
elif value.startswith('$RE_'):
value = value.split('_')
if not len(value) == 3:
continue
try:
grp_num = int(value[1])
except ValueError:
continue
grp_type = value[2]
if grp_type == u'INT':
try:
msg[key] = int(self._data['match'].group(grp_num))
except IndexError:
self.log.error("no match group {0}".format(grp_num))
except ValueError:
self.log.error("cannot transform {0} to integer".format(self._data['match'].group(grp_num)))
elif grp_type == u'FLOAT':
try:
msg[key] = float(self._data['match'].group(grp_num))
except IndexError:
self.log.error("no match group {0}".format(grp_num))
except ValueError:
self.log.error("cannot transform {0} to float".format(self._data['match'].group(grp_num)))
elif grp_type == u'STR':
try:
msg[key] = self._data['match'].group(grp_num)
except IndexError:
self.log.error("no match group {0}".format(grp_num))
def build_message(self):
msg = {
"tag": self.syslog_tag,
"severity": self.syslog_severity,
"facility": self.syslog_facility
}
payload = copy.deepcopy(self.template)
self._build_message(payload)
msg["payload"] = payload
self._msgqueue.append(msg)
self._data = None
def process_line(self, line):
if self.regex:
match = self.regex.match(line)
if match and self._data:
self.log.debug("submitting previous message")
self.build_message()
self.log.debug("detected new log message")
self.process_first_line(line, match)
elif match and not self._data:
self.log.debug("detected new log message")
self.process_first_line(line, match)
elif self._data and not match:
self.log.debug("got new line for multiline payload")
self._data['other_lines'].append(line)
self._data['starving'] = False
else:
self.log.error("got line that is not matching regex, and not part of a multiline log message")
self.log.error("{0}".format(line))
pass
else:
self.log.debug("got new plan log message")
self.process_first_line(line, None)
self.build_message()
def process_first_line(self, line, match):
self.log.debug("creating new message")
self._data = {
"starving": False,
"facility": self.syslog_facility,
"tag": self.syslog_tag,
"severity": self.syslog_severity,
"first_line": line,
"other_lines": [],
"match": match
}
def close(self):
if self._fd:
self.log.debug("closing log file")
self._fd.close()
self._fd = None
self.log.debug("done closing log file")
def follow(self):
while not self.terminate:
if self._fd:
self.chk_stat()
if not self._fd:
self.open()
if not self._fd:
continue
self._pos = self._fd.tell()
line = self._fd.readline()
if line:
yield line
else:
self._fd.seek(self._pos, 0)
if self._data:
if self._data['starving']:
self.build_message()
else:
self._data['starving'] = True
time.sleep(1)
def chk_stat(self):
try:
stat = os.stat(self._file)
except OSError as err:
self.log.error("could not stat file: {0}".format(err))
self.close()
return
if self._pos > stat.st_size:
self.log.info("truncate detected, reopening")
self.close()
elif self._st_dev != stat.st_dev:
self.log.info("underling device changed, reopening")
self.close()
elif self._st_ino != stat.st_ino:
self.log.info("inode has changed, reopening")
self.close()
def _open(self):
if self._fd:
self.close()
self.log.debug("open logfile")
try:
self._fd = codecs.open(self._file, mode='r', encoding=self.encoding, errors='ignore')
self._fd.seek(0, 2)
stat = os.stat(self._file)
self._st_dev = stat.st_dev
self._st_ino = stat.st_ino
self._pos = self._fd.tell()
except OSError as err:
self.log.error("could not open logfile: {0}".format(err))
return False
self.log.debug("done open logfile")
return True
def open(self):
sleep = 0
while not self.terminate:
if sleep == 0:
if self._open():
return
sleep = 10
self.log.error("retrying opening in 10 seconds")
else:
time.sleep(1)
sleep -= 1
def run(self):
self.log.info("i am up")
for line in self.follow():
if self.terminate:
break
self.process_line(line)
self.log.info("i am going down")
self.close()
self.log.info("gone")
|
py | b404c17ae771782a0a50f395f274cb56ceb10425 | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'djindahaus.settings.shell')
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
py | b404c21b07719cf69416fa904f6f2c1407e5b4f8 | from __future__ import absolute_import
from sentry.models import ExternalIssue, Group, GroupLink, GroupStatus, Integration, User
from sentry.integrations.exceptions import IntegrationError
from sentry.tasks.base import instrumented_task, retry
@instrumented_task(
name='sentry.tasks.integrations.post_comment',
queue='integrations',
default_retry_delay=60 * 5,
max_retries=5
)
# TODO(jess): Add more retry exclusions once ApiClients have better error handling
@retry(exclude=(ExternalIssue.DoesNotExist, Integration.DoesNotExist))
def post_comment(external_issue_id, data, **kwargs):
# sync Sentry comments to an external issue
external_issue = ExternalIssue.objects.get(id=external_issue_id)
integration = Integration.objects.get(id=external_issue.integration_id)
integration.get_installation(
organization_id=external_issue.organization_id).create_comment(
external_issue.key, data['text'])
@instrumented_task(
name='sentry.tasks.integrations.jira.sync_metadata',
queue='integrations',
default_retry_delay=20,
max_retries=5
)
@retry(on=(IntegrationError,))
def sync_metadata(installation):
installation.sync_metadata()
@instrumented_task(
name='sentry.tasks.integrations.sync_assignee_outbound',
queue='integrations',
default_retry_delay=60 * 5,
max_retries=5
)
@retry(exclude=(ExternalIssue.DoesNotExist, Integration.DoesNotExist, User.DoesNotExist))
def sync_assignee_outbound(external_issue_id, user_id, assign, **kwargs):
# sync Sentry assignee to an external issue
external_issue = ExternalIssue.objects.get(id=external_issue_id)
integration = Integration.objects.get(id=external_issue.integration_id)
# assume unassign if None
if user_id is None:
user = None
else:
user = User.objects.get(id=user_id)
integration.get_installation().sync_assignee_outbound(external_issue, user, assign=assign)
@instrumented_task(
name='sentry.tasks.integrations.sync_status_outbound',
queue='integrations',
default_retry_delay=60 * 5,
max_retries=5
)
@retry(exclude=(ExternalIssue.DoesNotExist, Integration.DoesNotExist))
def sync_status_outbound(group_id, external_issue_id, **kwargs):
try:
group_status = Group.objects.filter(
id=group_id,
status__in=[GroupStatus.UNRESOLVED, GroupStatus.RESOLVED],
).values_list('status', flat=True)[0]
except IndexError:
return
external_issue = ExternalIssue.objects.get(id=external_issue_id)
integration = Integration.objects.get(id=external_issue.integration_id)
integration.get_installation().sync_status_outbound(
external_issue, group_status == GroupStatus.RESOLVED,
)
@instrumented_task(
name='sentry.tasks.integrations.kick_off_status_syncs',
queue='integrations',
default_retry_delay=60 * 5,
max_retries=5
)
@retry()
def kick_off_status_syncs(project_id, group_id, **kwargs):
# doing this in a task since this has to go in the event manager
# and didn't want to introduce additional queries there
external_issue_ids = GroupLink.objects.filter(
project_id=project_id,
group_id=group_id,
linked_type=GroupLink.LinkedType.issue,
).values_list('linked_id', flat=True)
for external_issue_id in external_issue_ids:
sync_status_outbound.apply_async(
kwargs={
'group_id': group_id,
'external_issue_id': external_issue_id,
}
)
|
py | b404c23ee969940801e886baf151cf8b24ec9b3a | import pandas as pd
from sklearn import preprocessing
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
from sklearn.feature_extraction.text import TfidfVectorizer
import os
from sklearn.decomposition import TruncatedSVD
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.model_selection import KFold
from sklearn.metrics import auc
from sklearn.metrics import plot_roc_curve
from numpy import interp
#Load train data
X_origin = pd.read_csv("train_gossipcop_vol2.csv", ",")
Y = X_origin['label'].values
X_origin = X_origin['text'].values
print("Train set read.")
#Load dev data
# X_dev = pd.read_csv("dev_gossipcop_vol2.csv", ",")
# Y_dev = X_dev['label'].values
# X_dev = X_dev['text'].values
# print("Dev set read.")
stopwords = set(ENGLISH_STOP_WORDS)
svm_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.29, stop_words=stopwords)
X = svm_vectorizer.fit_transform(X_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X = svd.fit_transform(X)
print("SVD finished.")
score_f = 0
score_a = 0
kf = KFold(n_splits=5,random_state=42, shuffle=True)
for i, (train, test) in enumerate(kf.split(X)):
X_train = X[train]
X_test = X[test]
Y_train = Y[train]
Y_test = Y[test]
#clf = SVC(random_state=42)
clf = SVC(C=10, gamma='scale', kernel='rbf', random_state=42, probability=True)
clf.fit(X_train,Y_train)
Y_predicted = clf.predict(X_test)
score_f += f1_score(Y_test,Y_predicted)
score_a += accuracy_score(Y_test,Y_predicted)
score_f /= 5
score_a /= 5
print("SVM Accuracy: " + str(score_a))
print("SVM F1 score: " + str(score_f))
knn_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.56, stop_words=stopwords)
X = knn_vectorizer.fit_transform(X_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X = svd.fit_transform(X)
print("SVD finished.")
score_f = 0
score_a = 0
kf = KFold(n_splits=5,random_state=42, shuffle=True)
for train, test in kf.split(X):
X_train = X[train]
X_test = X[test]
Y_train = Y[train]
Y_test = Y[test]
clf = KNeighborsClassifier()
#clf = KNeighborsClassifier(n_neighbors=7, weights='distance', metric='euclidean')
clf.fit(X_train,Y_train)
Y_predicted = clf.predict(X_test)
score_f += f1_score(Y_test,Y_predicted)
score_a += accuracy_score(Y_test,Y_predicted)
score_f /= 5
score_a /= 5
print("KNN Accuracy: " + str(score_a))
print("KNN F1 score: " + str(score_f))
LR_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.65, stop_words=stopwords)
X = LR_vectorizer.fit_transform(X_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X = svd.fit_transform(X)
print("SVD finished.")
score_f = 0
score_a = 0
kf = KFold(n_splits=5,random_state=42, shuffle=True)
for train, test in kf.split(X):
X_train = X[train]
X_test = X[test]
Y_train = Y[train]
Y_test = Y[test]
clf = LogisticRegression(random_state=42)
#clf = LogisticRegression(C = 100, penalty='l1', solver='liblinear', max_iter=1000, random_state=42)
clf.fit(X_train,Y_train)
Y_predicted = clf.predict(X_test)
score_f += f1_score(Y_test,Y_predicted)
score_a += accuracy_score(Y_test,Y_predicted)
score_f /= 5
score_a /= 5
print("LR Accuracy: " + str(score_a))
print("LR F1 score: " + str(score_f))
DT_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.25, stop_words=stopwords)
X = DT_vectorizer.fit_transform(X_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X = svd.fit_transform(X)
print("SVD finished.")
score_f = 0
score_a = 0
kf = KFold(n_splits=5,random_state=42, shuffle=True)
for train, test in kf.split(X):
X_train = X[train]
X_test = X[test]
Y_train = Y[train]
Y_test = Y[test]
clf = DecisionTreeClassifier(random_state=42)
#clf = DecisionTreeClassifier(criterion='entropy', max_depth=7, min_samples_split=420, random_state=42)
clf.fit(X_train,Y_train)
Y_predicted = clf.predict(X_test)
score_f += f1_score(Y_test,Y_predicted)
score_a += accuracy_score(Y_test,Y_predicted)
score_f /= 5
score_a /= 5
print("DT Accuracy: " + str(score_a))
print("DT F1 score: " + str(score_f))
RF_vectorizer = TfidfVectorizer(sublinear_tf = True, max_df = 0.21, stop_words=stopwords)
X = RF_vectorizer.fit_transform(X_origin)
print("Vectorized.")
svd = TruncatedSVD(n_components=150, algorithm='arpack', random_state=42)
print("SVD prepared.")
X = svd.fit_transform(X)
print("SVD finished.")
score_f = 0
score_a = 0
kf = KFold(n_splits=5,random_state=42, shuffle=True)
for train, test in kf.split(X):
X_train = X[train]
X_test = X[test]
Y_train = Y[train]
Y_test = Y[test]
clf = RandomForestClassifier(random_state=42)
#clf = RandomForestClassifier(criterion='gini', max_depth=None, min_samples_split=2, n_estimators=180, random_state=42)
clf.fit(X_train,Y_train)
Y_predicted = clf.predict(X_test)
score_f += f1_score(Y_test,Y_predicted)
score_a += accuracy_score(Y_test,Y_predicted)
score_f /= 5
score_a /= 5
print("RF Accuracy: " + str(score_a))
print("RF F1 score: " + str(score_f)) |
py | b404c27c40716e68db37f7e1a31e9289d05a926f | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from LLC_Membranes.analysis.markov_state_dependent_dynamics import States
from LLC_Membranes.llclib import file_rw
from scipy.stats import levy_stable
import matplotlib.pyplot as plt
import numpy as np
res = 'URE'
path = "/home/bcoscia/Documents/Gromacs/Transport/NaGA3C11/%s/10wt/states.pl" % res
states = file_rw.load_object(path)
alpha, mu, sigma = states.fit_params[-1]
bins, edges = np.histogram(states.emissions[-1], bins=200, range=(-1.75, 1.75), density=True)
bin_width = edges[1] - edges[0]
centers = edges[:-1] + bin_width / 2
ratio = [bins[i] / levy_stable.pdf(x, alpha=alpha, beta=0, loc=mu, scale=sigma) for i, x in enumerate(centers)]
plt.figure(figsize=(10, 6))
plt.bar(centers, bins, bin_width, align='center', alpha=0.5, label='Empirical Disribution', color='xkcd:blue')
plt.plot(centers, levy_stable.pdf(centers, alpha=alpha, beta=0, loc=mu, scale=sigma), '--', lw=2,
color='xkcd:orange', label='Analytical PDF')
plt.plot(centers, ratio, label='Ratio of Empirical:Analytical PDF', color='red', lw=2)
plt.plot(np.linspace(-1.75, 1.75, 3), np.ones([3]), '--', color='black', label='Exact agreement')
plt.gcf().get_axes()[0].tick_params(labelsize=14)
plt.legend(fontsize=14)
plt.ylabel('Probability Density', fontsize=14)
plt.tight_layout()
plt.show()
|
py | b404c2e2e3607d5345fb0fabe1d491ac0b5085e2 | # Import flask and template operators
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
# Define the WSGI application object
app = Flask(__name__)
app.url_map.strict_slashes = False
# Configurations
app.config.from_object("config")
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = False
db = SQLAlchemy(app)
import halal.api
import halal.views
import halal.models
import halal.database
# Sample HTTP error handling
@app.errorhandler(404)
def not_found(error):
return "Error, 404", 404
|
py | b404c2f8684e8a66ba4b7be4c27c70ffc794ff6a | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 22/11/18
@author: Maurizio Ferrari Dacrema
"""
import numpy as np
import time, sys, os
from Base.Recommender_utils import check_matrix
import scipy.sparse as sps
def split_big_CSR_in_columns(sparse_matrix_to_split, num_split = 2):
"""
The function returns a list of split for the given matrix
:param sparse_matrix_to_split:
:param num_split:
:return:
"""
assert sparse_matrix_to_split.shape[1]>0, "split_big_CSR_in_columns: sparse_matrix_to_split has no columns"
assert num_split>=1 and num_split <= sparse_matrix_to_split.shape[1], "split_big_CSR_in_columns: num_split parameter not valid, value must be between 1 and {}, provided was {}".format(sparse_matrix_to_split.shape[1], num_split)
if num_split == 1:
return [sparse_matrix_to_split]
n_column_split = int(sparse_matrix_to_split.shape[1]/num_split)
sparse_matrix_split_list = []
for num_current_split in range(num_split):
start_col = n_column_split*num_current_split
if num_current_split +1 == num_split:
end_col = sparse_matrix_to_split.shape[1]
else:
end_col = n_column_split*(num_current_split + 1)
print("split_big_CSR_in_columns: Split {}, columns: {}-{}".format(num_current_split, start_col, end_col))
sparse_matrix_split_list.append(sparse_matrix_to_split[:,start_col:end_col])
return sparse_matrix_split_list
def remove_empty_rows_and_cols(URM, ICM = None):
URM = check_matrix(URM, "csr")
rows = URM.indptr
numRatings = np.ediff1d(rows)
user_mask = numRatings >= 1
URM = URM[user_mask,:]
cols = URM.tocsc().indptr
numRatings = np.ediff1d(cols)
item_mask = numRatings >= 1
URM = URM[:,item_mask]
removedUsers = np.arange(0, len(user_mask))[np.logical_not(user_mask)]
removedItems = np.arange(0, len(item_mask))[np.logical_not(item_mask)]
if ICM is not None:
ICM = ICM[item_mask,:]
return URM.tocsr(), ICM.tocsr(), removedUsers, removedItems
return URM.tocsr(), removedUsers, removedItems
from Data_manager.IncrementalSparseMatrix import IncrementalSparseMatrix, IncrementalSparseMatrix_FilterIDs
import pandas as pd
def remove_Dataframe_duplicates(dataframe, unique_values_in_columns = ['UserID', 'ItemID'], keep_highest_value_in_col ="timestamp"):
"""
:param dataframe:
:param unique_values_in_columns: List of column headers. The combination of the two will occur only once
:param keep_highest_value_in_col: Column where the max value will be selected if a duplicate will be removed
:return:
"""
# # Remove duplicates.
# This way of removing the duplicates keeping the last timemstamp without removing other columns
# would be the simplest, but it is so slow to the point of being unusable on any dataset but ML100k
# idxs = df_original.groupby(by=['UserID', 'ItemID'], as_index=False)["timestamp"].idxmax()
# df_original = df_original.loc[idxs]
# Alternative faster way:
# 1 - Sort in ascending order so that the last (bigger) timestamp is in the last position. Set Nan to be in the first position, to remove them if possible
# 2 - Then remove duplicates for user-item keeping the last row, which will be the last timestamp.
sort_by = unique_values_in_columns.copy()
sort_by.extend([keep_highest_value_in_col])
dataframe.sort_values(by=sort_by, ascending=True, inplace=True, kind="quicksort", na_position="first")
dataframe.drop_duplicates(unique_values_in_columns, keep='last', inplace=True)
n_data_points = len(dataframe[unique_values_in_columns[0]].values)
n_unique_data_points = dataframe.drop_duplicates(unique_values_in_columns, keep='first', inplace=False).shape[0]
assert n_unique_data_points == n_data_points, "remove_Dataframe_duplicates: duplicate values found"
return dataframe
def load_CSV_into_Dataframe (filePath, header = False, separator="::", timestamp = False,
remove_duplicates = False,
custom_user_item_rating_columns = None):
"""
The function loads a CSV file into a Dataframe
:param filePath:
:param header: True/False the file does have a header
:param separator:
:param timestamp: True/False load the timestamp as well
:param remove_duplicates: Remove row/column duplicates, if the timestamp is provided it kees the most recent one,
otherwise the highest rating or interaction value.
:param custom_user_item_rating_columns: Column names for the user_id, item_id and rating value as in the file header
:return:
"""
if timestamp:
dtype={0:str, 1:str, 2:float, 3:float}
columns = ['UserID', 'ItemID', 'interaction', 'timestamp']
else:
dtype={0:str, 1:str, 2:float}
columns = ['UserID', 'ItemID', 'interaction']
df_original = pd.read_csv(filepath_or_buffer=filePath, sep=separator, header= 0 if header else None,
dtype=dtype, usecols=custom_user_item_rating_columns)
# If the original file has more columns, keep them but ignore them
df_original.columns = columns
user_id_list = df_original['UserID'].values
# Check if duplicates exist
num_unique_user_item_ids = df_original.drop_duplicates(['UserID', 'ItemID'], keep='first', inplace=False).shape[0]
contains_duplicates_flag = num_unique_user_item_ids != len(user_id_list)
if contains_duplicates_flag:
if remove_duplicates:
df_original = remove_Dataframe_duplicates(df_original, unique_values_in_columns= ['UserID', 'ItemID'],
keep_highest_value_in_col = "timestamp" if timestamp else "interaction")
else:
assert num_unique_user_item_ids == len(user_id_list), "load_CSV_into_SparseBuilder: duplicate (user, item) values found"
return df_original
def load_CSV_into_SparseBuilder (filePath, header = False, separator="::", timestamp = False, remove_duplicates = False,
custom_user_item_rating_columns = None, create_mapper = True,
preinitialized_row_mapper = None, preinitialized_col_mapper = None,
on_new_col = "add", on_new_row = "add"):
"""
The function loads a CSV file into a URM
:param filePath:
:param header: True/False the file does have a header
:param separator:
:param timestamp: True/False load the timestamp as well
:param remove_duplicates: Remove row/column duplicates, if the timestamp is provided it kees the most recent one,
otherwise the highest rating or interaction value.
:param custom_user_item_rating_columns: Column names for the user_id, item_id and rating value as in the file header
:param create_mapper: True map the IDs into a new interger value, False use the original value
:param preinitialized_row_mapper: Dictionary {originalID: matrix index} to translate rowIDs into row indices (e.g., userID into user index)
:param preinitialized_col_mapper: Dictionary {originalID: matrix index} to translate rowIDs into row indices (e.g., ItemID into item index)
:return:
"""
if preinitialized_row_mapper is not None or preinitialized_col_mapper is not None:
URM_all_builder = IncrementalSparseMatrix_FilterIDs(preinitialized_col_mapper = preinitialized_col_mapper,
preinitialized_row_mapper = preinitialized_row_mapper,
on_new_col = on_new_col, on_new_row = on_new_row)
URM_timestamp_builder = IncrementalSparseMatrix_FilterIDs(preinitialized_col_mapper = preinitialized_col_mapper,
preinitialized_row_mapper = preinitialized_row_mapper,
on_new_col = on_new_col, on_new_row = on_new_row)
else:
URM_all_builder = IncrementalSparseMatrix(auto_create_col_mapper = create_mapper, auto_create_row_mapper = create_mapper)
URM_timestamp_builder = IncrementalSparseMatrix(auto_create_col_mapper = create_mapper, auto_create_row_mapper = create_mapper)
if timestamp:
dtype={0:str, 1:str, 2:float, 3:float}
columns = ['userId', 'itemId', 'interaction', 'timestamp']
else:
dtype={0:str, 1:str, 2:float}
columns = ['userId', 'itemId', 'interaction']
df_original = pd.read_csv(filepath_or_buffer=filePath, sep=separator, header= 0 if header else None,
dtype=dtype, usecols=custom_user_item_rating_columns)
# If the original file has more columns, keep them but ignore them
df_original.columns = columns
user_id_list = df_original['userId'].values
item_id_list = df_original['itemId'].values
interaction_list = df_original['interaction'].values
# Check if duplicates exist
num_unique_user_item_ids = df_original.drop_duplicates(['userId', 'itemId'], keep='first', inplace=False).shape[0]
contains_duplicates_flag = num_unique_user_item_ids != len(user_id_list)
if contains_duplicates_flag:
if remove_duplicates:
# # Remove duplicates.
# This way of removing the duplicates keeping the last tiemstamp without removing other columns
# would be the simplest, but it is so slow to the point of being unusable on any dataset but ML100k
# idxs = df_original.groupby(by=['userId', 'itemId'], as_index=False)["timestamp"].idxmax()
# df_original = df_original.loc[idxs]
# Alternative faster way:
# 1 - Sort in ascending order so that the last (bigger) timestamp is in the last position. Set Nan to be in the first position, to remove them if possible
# 2 - Then remove duplicates for user-item keeping the last row, which will be the last timestamp.
if timestamp:
sort_by = ["userId", "itemId", "timestamp"]
else:
sort_by = ["userId", "itemId", 'interaction']
df_original.sort_values(by=sort_by, ascending=True, inplace=True, kind="quicksort", na_position="first")
df_original.drop_duplicates(["userId", "itemId"], keep='last', inplace=True)
user_id_list = df_original['userId'].values
item_id_list = df_original['itemId'].values
interaction_list = df_original['interaction'].values
assert num_unique_user_item_ids == len(user_id_list), "load_CSV_into_SparseBuilder: duplicate (user, item) values found"
else:
assert num_unique_user_item_ids == len(user_id_list), "load_CSV_into_SparseBuilder: duplicate (user, item) values found"
URM_all_builder.add_data_lists(user_id_list, item_id_list, interaction_list)
if timestamp:
timestamp_list = df_original['timestamp'].values
URM_timestamp_builder.add_data_lists(user_id_list, item_id_list, timestamp_list)
return URM_all_builder.get_SparseMatrix(), URM_timestamp_builder.get_SparseMatrix(), \
URM_all_builder.get_column_token_to_id_mapper(), URM_all_builder.get_row_token_to_id_mapper()
return URM_all_builder.get_SparseMatrix(), \
URM_all_builder.get_column_token_to_id_mapper(), URM_all_builder.get_row_token_to_id_mapper()
def merge_ICM(ICM1, ICM2, mapper_ICM1, mapper_ICM2):
ICM_all = sps.hstack([ICM1, ICM2], format='csr')
mapper_ICM_all = mapper_ICM1.copy()
for key in mapper_ICM2.keys():
mapper_ICM_all[key] = mapper_ICM2[key] + len(mapper_ICM1)
return ICM_all, mapper_ICM_all
def compute_density(URM):
n_users, n_items = URM.shape
n_interactions = URM.nnz
# This avoids the fixed bit representation of numpy preventing
# an overflow when computing the product
n_items = float(n_items)
n_users = float(n_users)
if n_interactions == 0:
return 0.0
return n_interactions/(n_items*n_users)
def remove_features(ICM, min_occurrence = 5, max_percentage_occurrence = 0.30, reconcile_mapper = None):
"""
The function eliminates the values associated to feature occurring in less than the minimal percentage of items
or more then the max. Shape of ICM is reduced deleting features.
:param ICM:
:param minPercOccurrence:
:param max_percentage_occurrence:
:param reconcile_mapper: DICT mapper [token] -> index
:return: ICM
:return: deletedFeatures
:return: DICT mapper [token] -> index
"""
ICM = check_matrix(ICM, 'csc')
n_items = ICM.shape[0]
cols = ICM.indptr
numOccurrences = np.ediff1d(cols)
feature_mask = np.logical_and(numOccurrences >= min_occurrence, numOccurrences <= n_items * max_percentage_occurrence)
ICM = ICM[:,feature_mask]
deletedFeatures = np.arange(0, len(feature_mask))[np.logical_not(feature_mask)]
print("RemoveFeatures: removed {} features with less then {} occurrences, removed {} features with more than {} occurrencies".format(
sum(numOccurrences < min_occurrence), min_occurrence,
sum(numOccurrences > n_items * max_percentage_occurrence), int(n_items * max_percentage_occurrence)
))
if reconcile_mapper is not None:
reconcile_mapper = reconcile_mapper_with_removed_tokens(reconcile_mapper, deletedFeatures)
return ICM, deletedFeatures, reconcile_mapper
return ICM, deletedFeatures
def reconcile_mapper_with_removed_tokens(key_to_value_dict, values_to_remove):
"""
:param mapper_dict: must be a mapper of [token] -> index
:param indices_to_remove:
:return:
"""
# When an index has to be removed:
# - Delete the corresponding key
# - Decrement all greater indices
# Get all values of the mapper into an array to speed-up the decrementing process
# We need a 1-to-1 association between the mapper key and the array position
# Assumptions: in dictionary mapper_dict there is a 1-to-1 association to an index
assert len(set(key_to_value_dict.values())) == len(key_to_value_dict), "mapper_dict values do not have a 1-to-1 correspondance with the key"
# The value is an index, so we can use it to be both the value and the index of an array.
# We do not assume values to be contiguous, the missing ones will be -np.inf
mapper_values_array = np.ones(max(key_to_value_dict.values())+1, dtype=np.int) * -np.inf
value_to_key = invert_dictionary(key_to_value_dict)
# Set all old indices
for key, old_index in key_to_value_dict.items():
mapper_values_array[old_index] = old_index
# Set to -np.inf all indices to be removed
# Remove keys in original dictionary
for value_to_remove in values_to_remove:
mapper_values_array[value_to_remove] = -np.inf
assert value_to_remove in value_to_key, "Value to be removed from dictionary is not in dictionary"
key_to_remove = value_to_key[value_to_remove]
del key_to_value_dict[key_to_remove]
# To update the indices, start from 0 and allocate the index n to the n-th finite value in mapper_values_array
# Use cumulative sum, each cell is equals to the number of finite (e.g. valid) cells before
# Ensure the first index is 0 and not 1
mapper_values_array_finite = np.isfinite(mapper_values_array)
mapper_values_array_new_indices = np.cumsum(mapper_values_array_finite)
mapper_values_array_new_indices -= 1
# Replace old value with new
for key, old_index in key_to_value_dict.items():
new_index = mapper_values_array_new_indices[old_index]
key_to_value_dict[key] = new_index
return key_to_value_dict
def download_from_URL(URL, folder_path, file_name):
import urllib
from urllib.request import urlretrieve
# If directory does not exist, create
if not os.path.exists(folder_path):
os.makedirs(folder_path)
print("Downloading: {}".format(URL))
print("In folder: {}".format(folder_path + file_name))
try:
urlretrieve (URL, folder_path + file_name, reporthook=urllretrieve_reporthook)
except urllib.request.URLError as urlerror:
print("Unable to complete automatic download, network error")
raise urlerror
sys.stdout.write("\n")
sys.stdout.flush()
def urllretrieve_reporthook(count, block_size, total_size):
global start_time_urllretrieve
if count == 0:
start_time_urllretrieve = time.time()
return
if total_size < 0 or not np.isfinite(total_size):
total_size = np.nan
duration = time.time() - start_time_urllretrieve + 1
progress_size = int(count * block_size)
speed = int(progress_size / (1024 * duration))
percent = min(float(count*block_size*100/total_size),100)
sys.stdout.write("\rDataReader: Downloaded {:.2f}%, {:.2f} MB, {:.0f} KB/s, {:.0f} seconds passed".format(
percent, progress_size / (1024 * 1024), speed, duration))
sys.stdout.flush()
def invert_dictionary(id_to_index):
index_to_id = {}
for id in id_to_index.keys():
index = id_to_index[id]
assert index not in index_to_id, "Dictionary is not invertible as it contains duplicate values."
index_to_id[index] = id
return index_to_id
def add_boolean_matrix_iterator(original_data_dict):
output_data_dict = {}
for matrix_name, matrix_object in original_data_dict.items():
output_data_dict[matrix_name] = matrix_object
if np.max(matrix_object.data) != 1.0 or np.min(matrix_object.data) != 1.0:
matrix_object_implicit = matrix_object.copy()
matrix_object_implicit.astype(np.bool, copy=True)
matrix_object_implicit.data = np.ones_like(matrix_object.data)
output_data_dict[matrix_name + "_bool"] = matrix_object_implicit
return output_data_dict |
py | b404c6257a94d1ca974a44703423b3b8d536637d | #!/usr/bin/env python3
import sys
import shutil # For git
import subprocess # For git
from datetime import datetime # For date
import getpass # For username
import socket # For hostname
class BuildObj:
"""
Build Obj will generate an string representing the current release
The output file should be in the format:
"ESBMC built from <git-hash> <date> by <username>@<hostname> (dirty-tree)?"
"""
STR_ESBMC_BUILT_FROM = "ESBMC built from "
STR_BY = "by"
STR_AT = "@"
STR_DIRTY = "(dirty tree)"
STR_NOT_GIT = "no-hash"
@staticmethod
def get_last_hash() -> str:
"""Return the hash of the latest commit"""
git = shutil.which("git")
if git is None:
return BuildObj.STR_NOT_GIT
return subprocess.check_output([git, "rev-parse", "HEAD"]).decode().strip()
@staticmethod
def get_datetime() -> str:
"""Try to simulate the `date` command"""
output = datetime.now()
return str(output)
@staticmethod
def get_username() -> str:
return str(getpass.getuser())
@staticmethod
def get_hostname() -> str:
return str(socket.gethostname())
@staticmethod
def is_dirty_tree() -> bool:
git = shutil.which("git")
if git is None:
return True
output = subprocess.check_output(
[git, "status", "-s"]).decode().splitlines()
for x in output:
if '??' not in x:
return True
return False
@staticmethod
def run(output):
with open(output, 'w') as f:
f.write(f'{BuildObj.STR_ESBMC_BUILT_FROM} ')
f.write(f'{BuildObj.get_last_hash()} ')
f.write(f'{BuildObj.get_datetime()} ')
f.write(f'{BuildObj.STR_BY} ')
f.write(f'{BuildObj.get_username()}')
f.write(f'{BuildObj.STR_AT}')
f.write(f'{BuildObj.get_hostname()}')
if BuildObj.is_dirty_tree():
f.write(f' {BuildObj.STR_DIRTY}')
def main():
if len(sys.argv) != 2:
raise ValueError("Program expects <output>")
output = sys.argv[1]
BuildObj.run(output)
if __name__ == "__main__":
main()
|
py | b404c7015e204ac51979d55d5408fd41bba13d54 | #import the libraries
import PIL.Image
import PIL.ImageDraw
import numpy as np
import face_recognition
import datetime, imageio,argparse, imutils, time, dlib, cv2, PIL,os
from imutils.video import VideoStream
from imutils import face_utils
from testing.utils import smoothL1, relu6, DepthwiseConv2D, mask_weights
from testing.mark_detector import MarkDetector
print(dir(imutils.skeletonize))
def landmark_image(image_file_path):
# on image
# Load the jpg file into a NumPy array
image = face_recognition.load_image_file(image_file_path)
# Find all the faces in the image
face_locations_list = face_recognition.face_locations(image)
face_landmarks_list = face_recognition.face_landmarks(image)
face_encodings_list = face_recognition.api.face_encodings(image, known_face_locations=face_locations_list)
# for face_location in face_locations_list:
# face_encoded = face_recognition.api.face_encodings(image, known_face_locations=face_location)
# face_recognition.api.compare_faces(face_encodings_list, face_encoded)
# print(face_compaired)
number_of_faces = len(face_locations_list)
print("I found {} face(s) in this photograph.".format(number_of_faces))
number_of_landmarks = len(face_landmarks_list)
print("I found {} landmarkset(s) in this photograph.".format(number_of_landmarks))
number_of_encodings = len(face_encodings_list)
print("I encoded {} face(s) in this photograph.".format(number_of_encodings))
# Load the image into a Python Image Library object so that we can draw on top of it and display it
pil_image = PIL.Image.fromarray(image)
for face_location in face_locations_list:
# Print the location of each face in this image. Each face is a list of co-ordinates in (top, right, bottom, left) order.
top, right, bottom, left = face_location
print("A face is located at pixel location Top: {}, Left: {}, Bottom: {}, Right: {}".format(top, left, bottom, right))
# Let's draw a box around the face
draw = PIL.ImageDraw.Draw(pil_image)
draw.rectangle([left, top, right, bottom], outline="red")
print(face_landmarks_list)
for face_landmarks in face_landmarks_list:
# print(face_landmarks)
# Loop over each facial feature (eye, nose, mouth, lips, etc)
for name, list_of_points in face_landmarks.items():
# print(list_of_points)
hull = np.array(face_landmarks[name])
hull_landmark = cv2.convexHull(hull)
cv2.drawContours(image, hull_landmark, -1, (0, 255, 0), 3)
# draw.rectangle([left, top, right, bottom], outline="red")
print(name)
print(face_landmarks[name][0])
cv2.circle(image, face_landmarks[name][0], 10, (0, 255, 0), 3)
# Display the image on screen
pil_image.show()
def landmark_video():
#videostream
cap = cv2.VideoCapture(0)
while True:
ret, frame = cap.read()
frame = cv2.resize(frame, (0,0), fx=1, fy=1)
# Find all facial features in all the faces in the video
face_landmarks_list = face_recognition.face_landmarks(frame)
for face_landmarks in face_landmarks_list:
# Loop over each facial feature (eye, nose, mouth, lips, etc)
for name, list_of_points in face_landmarks.items():
hull = np.array(face_landmarks[name])
hull_landmark = cv2.convexHull(hull)
cv2.drawContours(frame, hull_landmark, -1, (0, 255, 0), 3)
cv2.imshow("Frame", frame)
ch = cv2.waitKey(1)
if ch & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
#######################householdings: remove old frame images####################################
os.system('sudo rm -r test_images/*')
#######################Input parse###############################################################
# constructing the argument parse and parsing the arguments
ap = argparse.ArgumentParser()
# --shape-predictor : The path to dlib’s pre-trained facial landmark detector.'
ap.add_argument("-p", "--shape-predictor",default = 'shape_predictor_68_face_landmarks.dat',
help = "path to facial landmark detector")
# --video-file input the video file to predict on
ap.add_argument("-c", "--camera", type = bool, default = True,
help = "bool switch wheather videofile or camera should be used")
# --camera : True = Use Camera ,False = --video-file input
ap.add_argument("-v", "--video-file", type = bool, default = None,
help = "if --camera is set to False we need to define the input video file.")
# parse
args = vars(ap.parse_args())
###################################################
landmark_image('samples/oega.jpg') |
py | b404c8a8f6c5b2a14186c0a6afe9c0d8e51c6611 | import logging
log = logging.getLogger(__name__)
from Top import Top
import Parsers.XYZ
class Geom(Top):
def __init__(self):
self.comment = ''
self.coord = []
self.header_natoms = 0
self.props = []
self.atprops = []
self.to_kcalmol = 1.
def __nonzero__(self):
"""
Is nonzero if has at least one atom
"""
if self.coord:
return True
return False
def __len__(self):
"""
Returns number of atoms in a structure
"""
return len(self.coord)
def __getitem__(self,item):
"""
Return an atom by index
"""
return self.coord[item]
# It was not a good realization of __str__, so I blocked it
def _str__(self):
"""
Print short text information
"""
s = '%i atoms; Comment=\'%s\' ' % (len(self.coord),self.comment)
s += self.propsToString() + '\n'
return s
def addProp(self,a,v):
if not a in self.props:
self.props.append(a)
setattr(self,a,v)
def addAtProp(self,ap,visible=True):
"""
:param v: object of class AtomicProps
"""
an = ap.attrname
if visible and not an in self.atprops:
self.atprops.append(an)
setattr(self,an,ap)
def propsToString(self,ShowComment = False):
"""
Returns string representation of properties
"""
if ShowComment:
s = self.comment
else:
s = ''
for a in self.props:
s += '%s= %s ' % (a,getattr(self,a))
return s
def parseComment(self,s):
"""
Fetches parameters from a string.
Delimiter between fields is space, and format is 'Arg = Value' with any number of spaces before/afer equal sign
"""
result = {}
new_comment = s
# Add some aliases
syn = {
'e' : ('en','energy'),
'grad' : ('gradient',)
}
#delimiters = ('=',':')
delimiters = ('=')
for delim in delimiters:
s2 = s.replace(delim,' '+delim+' ').split()
for i in range(1,len(s2)-1):
if s2[i] == delim:
arg,val = s2[i-1],s2[i+1]
arl = str(arg).lower()
# Try to make value float
try:
val = float(val)
except ValueError:
pass
# Look for synonyms
for a in syn:
if arl in syn[a]:
arl = a
# Set new attribute
setattr(self,arl,val)
if not arl in self.props:
self.props.append(arl)
# Remove parsed parameter from comment
#new_comment = re.sub('%s\s*%s\s*%s'%(arg,delim,val),'',new_comment).strip() # TODO figure out later how to make it work with variables containing parentheses
self.comment = ''
#self.comment += new_comment.strip()
def write(self,fname,vectors=None):
"""
The actual function that writes coords is in the XYZ parser,
and it takes an object of class ListGeoms as an argument.
Here, we have only one geometry, so we supposed to create
an instance of ListGeoms. However, we can make a trick
by providing a simple list instead of ListGeoms to
write coords
"""
c = Parsers.XYZ.XYZ()
webpath = c.write(fname,geoms=[self],vectors=vectors)
return webpath
|
py | b404c9879cd29ffec9825fefeaebdf0b129a8939 | import torch
import pprint
import argparse
import numpy as np
from torch.utils.tensorboard import SummaryWriter
from tianshou.policy import DQNPolicy
from tianshou.env import SubprocVectorEnv
from tianshou.utils.net.discrete import DQN
from tianshou.trainer import offpolicy_trainer
from tianshou.data import Collector, ReplayBuffer
from atari import create_atari_environment, preprocess_fn
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--task', type=str, default='Pong')
parser.add_argument('--seed', type=int, default=1626)
parser.add_argument('--eps-test', type=float, default=0.05)
parser.add_argument('--eps-train', type=float, default=0.1)
parser.add_argument('--buffer-size', type=int, default=20000)
parser.add_argument('--lr', type=float, default=1e-3)
parser.add_argument('--gamma', type=float, default=0.9)
parser.add_argument('--n-step', type=int, default=1)
parser.add_argument('--target-update-freq', type=int, default=320)
parser.add_argument('--epoch', type=int, default=100)
parser.add_argument('--step-per-epoch', type=int, default=1000)
parser.add_argument('--collect-per-step', type=int, default=10)
parser.add_argument('--batch-size', type=int, default=64)
parser.add_argument('--layer-num', type=int, default=3)
parser.add_argument('--training-num', type=int, default=8)
parser.add_argument('--test-num', type=int, default=8)
parser.add_argument('--logdir', type=str, default='log')
parser.add_argument('--render', type=float, default=0.)
parser.add_argument(
'--device', type=str,
default='cuda' if torch.cuda.is_available() else 'cpu')
args = parser.parse_known_args()[0]
return args
def test_dqn(args=get_args()):
env = create_atari_environment(args.task)
args.state_shape = env.observation_space.shape or env.observation_space.n
args.action_shape = env.env.action_space.shape or env.env.action_space.n
# train_envs = gym.make(args.task)
train_envs = SubprocVectorEnv([
lambda: create_atari_environment(args.task)
for _ in range(args.training_num)])
# test_envs = gym.make(args.task)
test_envs = SubprocVectorEnv([
lambda: create_atari_environment(args.task)
for _ in range(args.test_num)])
# seed
np.random.seed(args.seed)
torch.manual_seed(args.seed)
train_envs.seed(args.seed)
test_envs.seed(args.seed)
# model
net = DQN(
args.state_shape[0], args.state_shape[1],
args.action_shape, args.device)
net = net.to(args.device)
optim = torch.optim.Adam(net.parameters(), lr=args.lr)
policy = DQNPolicy(
net, optim, args.gamma, args.n_step,
target_update_freq=args.target_update_freq)
# collector
train_collector = Collector(
policy, train_envs, ReplayBuffer(args.buffer_size),
preprocess_fn=preprocess_fn)
test_collector = Collector(policy, test_envs, preprocess_fn=preprocess_fn)
# policy.set_eps(1)
train_collector.collect(n_step=args.batch_size * 4)
print(len(train_collector.buffer))
# log
writer = SummaryWriter(args.logdir + '/' + 'dqn')
def stop_fn(x):
if env.env.spec.reward_threshold:
return x >= env.spec.reward_threshold
else:
return False
def train_fn(x):
policy.set_eps(args.eps_train)
def test_fn(x):
policy.set_eps(args.eps_test)
# trainer
result = offpolicy_trainer(
policy, train_collector, test_collector, args.epoch,
args.step_per_epoch, args.collect_per_step, args.test_num,
args.batch_size, train_fn=train_fn, test_fn=test_fn,
stop_fn=stop_fn, writer=writer)
train_collector.close()
test_collector.close()
if __name__ == '__main__':
pprint.pprint(result)
# Let's watch its performance!
env = create_atari_environment(args.task)
collector = Collector(policy, env, preprocess_fn=preprocess_fn)
result = collector.collect(n_episode=1, render=args.render)
print(f'Final reward: {result["rew"]}, length: {result["len"]}')
collector.close()
if __name__ == '__main__':
test_dqn(get_args())
|
py | b404ca2c4e5e4128a75e34d28b8af2c62f5ce8e2 | from fastai import * #get_transforms, imagenet_stats ,ClassificationInterpretation
#from fastai.models import resnet50
from fastai.vision import * #ImageList , cnn_learner
import pandas as pd
import numpy as np
from utilities import get_data
BASE_PATH = "./model_data"
def train():
# prep data
df = pd.read_csv("./LABELS/all.txt" , names = ["name", "label",'Cond', 'date', 'camid', 'file'], sep =" ")
df[["Cond","date","camid","file"]] = df["name"].str.split("/", n = 4,expand = True)
df["name"] = df["name"].apply(lambda x : "./PATCHES/"+x)
df_sh = df[["name", "label"]]
src= (ImageList.from_df( df_sh,".",) #Where to find the data? -> in path and its subfolders
.split_by_rand_pct() #How to split in train/valid? -> use the folders
.label_from_df(cols='label') )
data = (src.transform(get_transforms(), size=128) #Data augmentation? -> use tfms with a size of 64
.databunch()
.normalize(imagenet_stats))
learn = cnn_learner(data, model.resnet50, model_dir=BASE_PATH).to_fp16()
learn.lr_find()
learn.recorder.plot()
lr = 0.01
learn.fit_one_cycle(2, slice(lr))
learn.recorder.plot_losses()
learn.save(BASE_PATH+"resnet_cars.h5")
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
return learn
class PREDICT:
def __init__(self):
exists = os. path. isfile(BASE_PATH+"resnet_cars.h5")
if not exists :
train()
self.learn = cnn_learner(data, resnet50, model_dir=BASE_PATH).to_fp16()
self.learn.load(BASE_PATH+"resnet_cars.h5")
def occupied(self, image):
im = Image(pil2tensor(image,np.float32 ).div_(255))
log_preds_single = self.learn.predict(im) # Predict Imag
return log_preds_single[0].obj
def find_cars_in_slots(self, park_slots, image, plot=False, k=0) :
found = np.zeros(len(park_slots)).astype(int)
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
colors =["green","red"]
for i in range(len(park_slots)) :
outbox =park_slots.loc[i,["x2","y1","x1","y2"]].values.astype(int)
crop = image.crop(outbox)
found[i] = int(self.occupied(crop))
if plot :
draw = ImageDraw.Draw(image)
label = str(park_slots.at[i,"labels"])
label_size = draw.textsize(label, font)
left, top, right, bottom = outbox
# top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
#print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
for it in range(thickness):
draw.rectangle(
[left + it, top + it, right - it, bottom - it], outline=colors[found[i]],)# linestyle = style[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill="white")
draw.text(text_origin, label, fill=(0, 0, 0), font=font, color="b")
del draw
if plot:
plt.figure(figsize = (20,20))
plt.imshow(image)
plt.show()
plt.close()
return found
class DETECTION :
def __init__(self) :
self.image_data = get_data()
self.camera = str
self.predict = PREDICT()
def process_images(self,camera = "camera9"):
image_data = self.image_data
images = image_data[image_data["camera"] == self.camera ]["path"].values
images = np.sort(images)
park_slots = pd.read_csv("./parkings/"+camera+".csv")
occpancy = np.zeros((len(park_slots), len(images)))
for i, image in enumerate(images ):
occpancy[:,i] = self.predict.find_cars_in_slots( park_slots, image, plot=False, k=0)
return occpancy |
py | b404ca555bffa464aaa2bebc3a9f0f92a4e07282 | #! /usr/bin/env nix-shell
#! nix-shell -i python -p python3 nix nix-prefetch-git
import csv
import json
import re
import subprocess
import sys
from codecs import iterdecode
from collections import OrderedDict
from datetime import datetime
from os.path import abspath, dirname
from urllib.request import urlopen
HISTORY_URL = 'https://omahaproxy.appspot.com/history?os=linux'
DEB_URL = 'https://dl.google.com/linux/chrome/deb/pool/main/g'
BUCKET_URL = 'https://commondatastorage.googleapis.com/chromium-browser-official'
JSON_PATH = dirname(abspath(__file__)) + '/upstream-info.json'
def load_json(path):
with open(path, 'r') as f:
return json.load(f)
def nix_prefetch_url(url, algo='sha256'):
print(f'nix-prefetch-url {url}')
out = subprocess.check_output(['nix-prefetch-url', '--type', algo, url])
return out.decode('utf-8').rstrip()
def nix_prefetch_git(url, rev):
print(f'nix-prefetch-git {url} {rev}')
out = subprocess.check_output(['nix-prefetch-git', '--quiet', '--url', url, '--rev', rev])
return json.loads(out)
def get_file_revision(revision, file_path):
url = f'https://raw.githubusercontent.com/chromium/chromium/{revision}/{file_path}'
with urlopen(url) as http_response:
return http_response.read()
def get_channel_dependencies(channel):
deps = get_file_revision(channel['version'], 'DEPS')
gn_pattern = b"'gn_version': 'git_revision:([0-9a-f]{40})'"
gn_commit = re.search(gn_pattern, deps).group(1).decode()
gn = nix_prefetch_git('https://gn.googlesource.com/gn', gn_commit)
return {
'gn': {
'version': datetime.fromisoformat(gn['date']).date().isoformat(),
'url': gn['url'],
'rev': gn['rev'],
'sha256': gn['sha256']
}
}
channels = {}
last_channels = load_json(JSON_PATH)
print(f'GET {HISTORY_URL}', file=sys.stderr)
with urlopen(HISTORY_URL) as resp:
builds = csv.DictReader(iterdecode(resp, 'utf-8'))
for build in builds:
channel_name = build['channel']
# If we've already found a newer build for this channel, we're
# no longer interested in it.
if channel_name in channels:
continue
# If we're back at the last build we used, we don't need to
# keep going -- there's no new version available, and we can
# just reuse the info from last time.
if build['version'] == last_channels[channel_name]['version']:
channels[channel_name] = last_channels[channel_name]
continue
channel = {'version': build['version']}
suffix = 'unstable' if channel_name == 'dev' else channel_name
try:
channel['sha256'] = nix_prefetch_url(f'{BUCKET_URL}/chromium-{build["version"]}.tar.xz')
channel['sha256bin64'] = nix_prefetch_url(f'{DEB_URL}/google-chrome-{suffix}/google-chrome-{suffix}_{build["version"]}-1_amd64.deb')
except subprocess.CalledProcessError:
# This build isn't actually available yet. Continue to
# the next one.
continue
channel['deps'] = get_channel_dependencies(channel)
channels[channel_name] = channel
with open(JSON_PATH, 'w') as out:
def get_channel_key(item):
channel_name = item[0]
if channel_name == 'stable':
return 0
elif channel_name == 'beta':
return 1
elif channel_name == 'dev':
return 2
else:
print(f'Error: Unexpected channel: {channel_name}', file=sys.stderr)
sys.exit(1)
sorted_channels = OrderedDict(sorted(channels.items(), key=get_channel_key))
json.dump(sorted_channels, out, indent=2)
out.write('\n')
|
py | b404ca8528baaac423b950271ff48dc9748f85b4 | import math
# number1
import operator
def maximum_of_three_numbers():
print("get the maximum number of three numbers")
print(max(int(input("enter 1st numb: ")), int(input("enter 2nd numb: ")), int(input("enter 3rd numb: "))))
# number2
def sum_number(list_):
sum_total = 0
for numb in list_:
sum_total += numb
print(numb)
# number3
def multiple(list_):
print(math.prod(list_))
# number4
def reverse_a_string(name):
print(name[::-1])
# number5
def factorial(number):
print(math.factorial(number))
# number 6
def number_fall(list_):
for numb in range(1, 7):
if 3 in list_:
print(list_[numb])
# number 7
def string_uppercase_lowercase():
global c
name = input("Enter name: ")
d = {"UPPER_CASE": 0, "LOWER_CASE": 0}
for c in name:
if c.isupper():
d["UPPER_CASE"] += 1
elif c.islower():
d["LOWER_CASE"] += 1
print("the original input is: ", c, "\n", "the number of upper case is: ", d["UPPER_CASE"], "\n",
"the number of lower case is: ", d["LOWER_CASE"])
# number 8
def unique_element(list_):
unique_list = []
for numb in list_:
if numb not in unique_list:
unique_list.append(numb)
result = f"sample list is {list_}, the unique list is: {unique_list}"
print(result)
# number 9
def prime_number(number):
number_divisors = []
for numb in range(1, number):
if number % numb == 0:
number_divisors.append(numb)
if len(number_divisors) == 1:
print("number is a prime number")
else:
print("number is not a prime number")
print(number_divisors)
# number 10
def even_number(list_):
even_number_list = []
for numb in list_:
if numb % 2 == 0:
even_number_list.append(numb)
print(even_number_list)
# print(numb if numb % 2 == 0 else "", end='')
# even_number_list = [str(numb) for numb in list_ if numb % 2 == 0]
# print("-".join(even_number_list))
# number 11
def perfect_number(number):
perfect_number_list = []
for numb in range(1, number):
if number % numb == 0:
perfect_number_list.append(numb)
if sum(perfect_number_list) == number:
print("Perfect number")
else:
print("number is not a perfect number")
# number 12
def string_palindrome(list_):
print(True if list_ == list_[::-1] else False)
# number 14
def pangram(string):
# strings = input("Enter words: ")
alphabet = ["abcdefghijklmnopqrstuvwxyz"]
for i in alphabet:
if i in string.lower().split():
print("pass")
else:
print("fail")
break
# number 15
def hyphen_separated_sequence(word):
sequence = word.split('-')
sequence.sort()
print("-".join(sequence))
# number 16
def value_square_numbers():
number = []
for i in range(1, 31):
if type(math.sqrt(i)) == type(1):
number.append(i)
print(number)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.