ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | 1a53eac9411336b4c91633bb82eb9fab8087372e | import threading
import traceback
import click
from ethereum import utils
from plasma.utils.utils import confirm_tx
from plasma.client.client import Client
from plasma.child_chain.transaction import Transaction
from plasma.config import plasma_config
CONTEXT_SETTINGS = dict(
help_option_names=['-h', '--help']
)
NULL_ADDRESS = b'\x00' * 20
@click.group(context_settings=CONTEXT_SETTINGS)
@click.pass_context
def cli(ctx):
ctx.obj = Client()
@cli.command()
@click.argument('contractaddress', required=True)
@click.argument('amount', required=True, type=int)
@click.argument('tokenid', required=True, type=int)
@click.argument('address', required=True)
@click.pass_obj
def deposit(client, contractaddress, amount, tokenid, address):
if contractaddress == "0x0":
contractaddress = NULL_ADDRESS
client.deposit(contractaddress, amount, tokenid, address)
print("Deposited {0} {1} #{2} to {3}".format(contractaddress, amount, tokenid, address))
@cli.command()
@click.argument('blknum1', type=int)
@click.argument('txindex1', type=int)
@click.argument('oindex1', type=int)
@click.argument('blknum2', type=int)
@click.argument('txindex2', type=int)
@click.argument('oindex2', type=int)
@click.argument('newowner1')
@click.argument('contractaddress1')
@click.argument('amount1', type=int)
@click.argument('tokenid1', type=int)
@click.argument('newowner2')
@click.argument('contractaddress2')
@click.argument('amount2', type=int)
@click.argument('tokenid2', type=int)
@click.argument('key1')
@click.argument('key2', required=False)
@click.pass_obj
def sendtx(client,
blknum1, txindex1, oindex1,
blknum2, txindex2, oindex2,
newowner1, contractaddress1, amount1, tokenid1,
newowner2, contractaddress2, amount2, tokenid2,
key1, key2):
if newowner1 == "0x0":
newowner1 = NULL_ADDRESS
if newowner2 == "0x0":
newowner2 = NULL_ADDRESS
if contractaddress1 == "0x0":
contractaddress1 = NULL_ADDRESS
if contractaddress2 == "0x0":
contractaddress2 = NULL_ADDRESS
if key2 is None:
key2 = key1
# Form a transaction
tx = Transaction(blknum1, txindex1, oindex1,
blknum2, txindex2, oindex2,
utils.normalize_address(newowner1), utils.normalize_address(contractaddress1), amount1, tokenid1,
utils.normalize_address(newowner2), utils.normalize_address(contractaddress2), amount2, tokenid2)
# Sign it
tx.sign1(utils.normalize_key(key1))
tx.sign2(utils.normalize_key(key2))
client.apply_transaction(tx)
print("Sent transaction")
def _submitblock(client, key, block):
# Sign the block
block.make_mutable()
normalized_key = utils.normalize_key(key)
block.sign(normalized_key)
client.submit_block(block)
print("Submitted current block")
@cli.command()
@click.argument('key', required=True)
@click.pass_obj
def submitblock(client, key):
# Get the current block, already decoded by client
block = client.get_current_block()
_submitblock(client, key, block)
def _autosubmitblock(client):
block = client.get_current_block()
if len(block.transaction_set) > 0:
_submitblock(client, plasma_config["AUTHORITY_KEY"], block)
threading.Timer(plasma_config["BLOCK_AUTO_SUMBITTER_INTERVAL"], lambda: _autosubmitblock(client)).start()
@cli.command()
@click.pass_obj
def autosubmitblock(client):
_autosubmitblock(client)
@cli.command()
@click.argument('blknum', required=True, type=int)
@click.argument('txindex', required=True, type=int)
@click.argument('oindex', required=True, type=int)
@click.argument('key1')
@click.argument('key2', required=False)
@click.pass_obj
def withdraw(client,
blknum, txindex, oindex,
key1, key2):
# Get the transaction's block, already decoded by client
block = client.get_block(blknum)
# Create a Merkle proof
tx = block.transaction_set[txindex]
block.merklize_transaction_set()
proof = block.merkle.create_membership_proof(tx.merkle_hash)
# Create the confirmation signatures
confirmSig1, confirmSig2 = b'', b''
if key1:
confirmSig1 = confirm_tx(tx, block.merkle.root, utils.normalize_key(key1))
if key2:
confirmSig2 = confirm_tx(tx, block.merkle.root, utils.normalize_key(key2))
sigs = tx.sig1 + tx.sig2 + confirmSig1 + confirmSig2
client.withdraw(blknum, txindex, oindex, tx, proof, sigs)
print('Submitted withdrawal')
@cli.command()
@click.argument('owner', required=True)
@click.argument('blknum', required=True, type=int)
@click.argument('amount', required=True, type=int)
@click.pass_obj
def withdrawdeposit(client, owner, blknum, amount):
deposit_pos = blknum * 1000000000
client.withdraw_deposit(owner, deposit_pos, amount)
print('Submitted withdrawal')
@cli.command()
@click.argument('address', required=True)
@click.pass_obj
def balance(client, address):
balance = client.get_balance(address, 'latest')
print("%s balance: %d" % (address, balance))
@cli.command()
@click.argument('address', required=True)
@click.argument('block', required=True)
@click.pass_obj
def balance(client, address, block):
balance = client.get_balance(address, block)
print(balance)
@cli.command()
@click.argument('address', required=True)
@click.argument('block', required=True)
@click.pass_obj
def utxo(client, address, block):
utxo = client.get_utxo(address, block)
print(utxo)
@cli.command()
@click.pass_obj
def all_transactions(client):
all_transactions = client.get_all_transactions()
for line in all_transactions:
print(line)
if __name__ == '__main__':
cli()
|
py | 1a53eb63231dbf08e34e7948694026d0d09e5329 | from reader import PGNReader
|
py | 1a53eb65b17ca790ea0a142aa86900dffc9f4630 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this
# software and associated documentation files (the "Software"), to deal in the Software
# without restriction, including without limitation the rights to use, copy, modify,
# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This code expects that you have AWS credentials setup per:
# https://boto3.amazonaws.com/v1/documentation/api/latest/guide/quickstart.html
from logging import basicConfig, getLogger, INFO
from boto3 import client
from pyqldbsamples.create_ledger import wait_for_active
from pyqldbsamples.delete_ledger import delete_ledger, set_deletion_protection
logger = getLogger(__name__)
basicConfig(level=INFO)
qldb_client = client('qldb')
LEDGER_NAME = 'deletion-protection-demo'
def create_with_deletion_protection(ledger_name):
"""
Create a new ledger with the specified name and with deletion protection enabled.
:type ledger_name: str
:param ledger_name: Name for the ledger to be created.
:rtype: dict
:return: Result from the request.
"""
logger.info("Let's create the ledger with name: {}...".format(ledger_name))
result = qldb_client.create_ledger(Name=ledger_name, PermissionsMode='ALLOW_ALL')
logger.info('Success. Ledger state: {}'.format(result.get('State')))
return result
def main(ledger_name=LEDGER_NAME):
"""
Demonstrate the protection of QLDB ledgers against deletion.
"""
try:
create_with_deletion_protection(ledger_name)
wait_for_active(ledger_name)
try:
delete_ledger(ledger_name)
except qldb_client.exceptions.ResourcePreconditionNotMetException:
logger.info('Ledger protected against deletions! Turning off deletion protection now.')
set_deletion_protection(ledger_name, False)
delete_ledger(ledger_name)
except Exception as e:
logger.exception('Error while updating or deleting the ledger!')
raise e
if __name__ == '__main__':
main()
|
py | 1a53eb78d5e5bab37da828dcb6d0a574780943cd | """Config flow for ReCollect Waste integration."""
from __future__ import annotations
from aiorecollect.client import Client
from aiorecollect.errors import RecollectError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_FRIENDLY_NAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .const import CONF_PLACE_ID, CONF_SERVICE_ID, DOMAIN, LOGGER
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_PLACE_ID): str, vol.Required(CONF_SERVICE_ID): str}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ReCollect Waste."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> config_entries.OptionsFlow:
"""Define the config flow to handle options."""
return RecollectWasteOptionsFlowHandler(config_entry)
async def async_step_import(self, import_config: dict = None) -> dict:
"""Handle configuration via YAML import."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input: dict = None) -> dict:
"""Handle configuration via the UI."""
if user_input is None:
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors={}
)
unique_id = f"{user_input[CONF_PLACE_ID]}, {user_input[CONF_SERVICE_ID]}"
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
session = aiohttp_client.async_get_clientsession(self.hass)
client = Client(
user_input[CONF_PLACE_ID], user_input[CONF_SERVICE_ID], session=session
)
try:
await client.async_get_next_pickup_event()
except RecollectError as err:
LOGGER.error("Error during setup of integration: %s", err)
return self.async_show_form(
step_id="user",
data_schema=DATA_SCHEMA,
errors={"base": "invalid_place_or_service_id"},
)
return self.async_create_entry(
title=unique_id,
data={
CONF_PLACE_ID: user_input[CONF_PLACE_ID],
CONF_SERVICE_ID: user_input[CONF_SERVICE_ID],
},
)
class RecollectWasteOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a Recollect Waste options flow."""
def __init__(self, entry: config_entries.ConfigEntry):
"""Initialize."""
self._entry = entry
async def async_step_init(self, user_input: dict | None = None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_FRIENDLY_NAME,
default=self._entry.options.get(CONF_FRIENDLY_NAME),
): bool
}
),
)
|
py | 1a53ebd7c7bd713ee4d48da5abfbdd899c7e1823 | from json_data import write_problems
from json_data import write_tasks
from json_data import write_summary_concepts
from json_data import write_summary_qtypes
def main():
write_problems.main()
write_tasks.main()
write_summary_concepts.main()
write_summary_qtypes.main()
if __name__ == '__main__':
main() |
py | 1a53ec7422be31aa7b9a55d8e5ecff1291e251c4 | #!/usr/bin/env python
"""
Extract outline edges of a given mesh and save them into
'<original path>/edge_<original mesh file name>.vtk'
or into a user defined output file.
The outline edge is an edge for which norm(nvec1 - nvec2) < eps,
where nvec1 and nvec2 are the normal vectors of the incident facets.
"""
from __future__ import absolute_import
import numpy as nm
from scipy.sparse import coo_matrix
import sys
sys.path.append('.')
from argparse import ArgumentParser
from sfepy.base.base import output, Struct
from sfepy.base.ioutils import edit_filename
from sfepy.discrete.fem import Mesh, FEDomain
from sfepy.discrete.fem.meshio import VTKMeshIO
def merge_lines(mesh, eps=1e-18):
coors, ngroups, conns, mat_ids, ctype = mesh
conns = conns[0]
# vertices to edges map
n_v = coors.shape[0]
n_e = conns.shape[0]
row = nm.repeat(nm.arange(n_e), 2)
aux = coo_matrix((nm.ones((n_e * 2,), dtype=nm.bool),
(row, conns.flatten())), shape=(n_e, n_v))
v2e = aux.tocsc()
n_epv = nm.diff(v2e.indptr)
# directional vectors of edges
de = coors[conns[:, 1], :] - coors[conns[:, 0], :]
de = de / nm.linalg.norm(de, axis=1)[:, nm.newaxis]
eflag = nm.ones((n_e, ), dtype=bool)
valid_e = nm.where(eflag)[0]
e_remove = []
while len(valid_e) > 0:
ie = valid_e[0]
d = de[ie]
buff = [(ie, conns[ie, 0]), (ie, conns[ie, 1])]
eflag[ie] = False # invalidate edge
while len(buff) > 0:
e, v = buff.pop(-1)
if n_epv[v] == 2:
idx = v2e.indptr[v]
aux = v2e.indices[idx]
next_e = v2e.indices[idx + 1] if aux == e else aux
if not eflag[next_e]: # valid edge?
continue
if nm.linalg.norm(de[next_e] - d) < eps\
or nm.linalg.norm(de[next_e] + d) < eps:
next_ec = conns[next_e, :]
new_v = next_ec[0] if next_ec[1] == v else next_ec[1]
idx = 0 if conns[e, 0] == v else 1
conns[e, idx] = new_v # reconnect edge
idx = v2e.indptr[new_v]
aux = v2e.indices[idx]
idx += 0 if aux == next_e else 1
v2e.indices[idx] = e # update v2e map
buff.append((e, new_v)) # continue in searching
eflag[next_e] = False # invalidate edge
e_remove.append(next_e)
valid_e = nm.where(eflag)[0]
if len(e_remove) > 0:
# remove unused edges and vertices
eflag.fill(True)
eflag[nm.asarray(e_remove)] = False
remap = -nm.ones((n_v, ), dtype=nm.int64)
remap[conns[eflag, :]] = 1
vidx = nm.where(remap > 0)[0]
remap[vidx] = nm.arange(len(vidx))
conns_new = remap[conns[eflag, :]]
return coors[vidx, :], ngroups[vidx],\
[conns_new], [mat_ids[0][eflag]], ctype
else:
return mesh
def extract_edges(mesh, eps=1e-16):
"""
Extract outline edges of a given mesh.
The outline edge is an edge for which norm(nvec_1 - nvec_2) < eps,
where nvec_1 and nvec_2 are the normal vectors of the incident facets.
Parameters
----------
mesh : Mesh
The 3D or 2D mesh.
eps : float
The tolerance parameter of the outline edge searching algorithm.
Returns
-------
mesh_out : tuple
The data of the outline mesh, Mesh.from_data() format, i.e.
(coors, ngroups, ed_conns, mat_ids, descs).
"""
domain = FEDomain('domain', mesh)
cmesh = domain.cmesh
output('Mesh - dimension: %d, vertices: %d, elements: %d'
% (mesh.dim, mesh.n_nod, mesh.n_el))
if mesh.dim == 2:
oedges = cmesh.get_surface_facets()
mesh_coors = nm.hstack([cmesh.coors,
nm.zeros((cmesh.coors.shape[0], 1))])
elif mesh.dim == 3:
cmesh.setup_connectivity(1, 2)
cmesh.setup_connectivity(3, 2)
sfaces = cmesh.get_surface_facets()
_, idxs = nm.unique(cmesh.get_conn(3, 2).indices, return_index=True)
normals = cmesh.get_facet_normals()[idxs, :]
se_map, se_off = cmesh.get_incident(1, sfaces, 2, ret_offsets=True)
sedges = nm.unique(se_map)
n_se = sedges.shape[0]
# remap surface edges to continuous range
se_remap = -nm.ones(sedges.max() + 1)
se_remap[sedges] = nm.arange(n_se)
se_map0 = se_remap[se_map]
# surface face/edge connectivity matrix (n_surf x n_edge)
n_ef = nm.diff(se_off)[0] # = 2
n_sf = se_map.shape[0] // n_ef
row = nm.repeat(nm.arange(n_sf), n_ef)
sf2e = coo_matrix((nm.ones((n_sf * n_ef,), dtype=nm.bool),
(row, se_map0)), shape=(n_sf, n_se))
# edge to face map (n_edge x 2)
se2f = sf2e.tocsc().indices.reshape((sedges.shape[0], 2))
snormals = normals[sfaces]
err = nm.linalg.norm(snormals[se2f[:, 0]] - snormals[se2f[:, 1]],
axis=1)
oedges = sedges[nm.where(err > eps)[0]]
mesh_coors = cmesh.coors
else:
raise NotImplementedError
# save outline mesh
if oedges.shape[0] > 0:
ec_idxs = nm.unique(cmesh.get_incident(0, oedges, 1))
ed_coors = mesh_coors[ec_idxs, :]
ngroups = nm.zeros((ed_coors.shape[0],), dtype=nm.int16)
aux = cmesh.get_conn(1, 0).indices
ed_conns = aux.reshape((aux.shape[0] // 2, 2))[oedges, :]
ec_remap = -nm.ones((ec_idxs.max() + 1, ), dtype=nm.int64)
ec_remap[ec_idxs] = nm.arange(ec_idxs.shape[0])
ed_conns = ec_remap[ed_conns]
mat_ids = nm.ones((ed_conns.shape[0],), dtype=nm.int16)
mesh_out = ed_coors, ngroups, [ed_conns], [mat_ids], ['3_2']
return mesh_out
else:
raise ValueError('no outline edges found (eps=%e)!' % eps)
helps = {
'eps': 'tolerance parameter of the edge search algorithm (default: 1e-12)',
'filename-out': 'name of output file',
}
def main():
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--eps', action='store', dest='eps',
default=1e-12, help=helps['eps'])
parser.add_argument('-o', '--filename-out',
action='store', dest='filename_out',
default=None, help=helps['filename-out'])
parser.add_argument('filename')
options = parser.parse_args()
filename = options.filename
mesh = Mesh.from_file(filename)
mesh_out = extract_edges(mesh, eps=float(options.eps))
mesh_out = merge_lines(mesh_out)
filename_out = options.filename_out
if filename_out is None:
filename_out = edit_filename(filename, prefix='edge_', new_ext='.vtk')
output('Outline mesh - vertices: %d, edges: %d, output filename: %s'
% (mesh_out[0].shape[0], mesh_out[2][0].shape[0], filename_out))
# hack to write '3_2' elements - edges
io = VTKMeshIO(None)
aux_mesh = Struct()
aux_mesh._get_io_data = lambda: mesh_out
aux_mesh.n_el = mesh_out[2][0].shape[0]
io.write(filename_out, aux_mesh)
if __name__ == '__main__':
main()
|
py | 1a53ee47b38497faa23b78e666d78b7fbdc9f7a3 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Tests for the Google Drive snapshots event formatter."""
import unittest
from plaso.formatters import gdrive
from tests.formatters import test_lib
class GDriveCloudEntryFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Google Drive snapshot cloud event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = gdrive.GDriveCloudEntryFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = gdrive.GDriveCloudEntryFormatter()
expected_attribute_names = [
u'path', u'shared', u'size', u'url', u'document_type']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
class GDriveLocalEntryFormatterTest(test_lib.EventFormatterTestCase):
"""Tests for the Google Drive snapshot local event formatter."""
def testInitialization(self):
"""Tests the initialization."""
event_formatter = gdrive.GDriveLocalEntryFormatter()
self.assertIsNotNone(event_formatter)
def testGetFormatStringAttributeNames(self):
"""Tests the GetFormatStringAttributeNames function."""
event_formatter = gdrive.GDriveLocalEntryFormatter()
expected_attribute_names = [u'path', u'size']
self._TestGetFormatStringAttributeNames(
event_formatter, expected_attribute_names)
# TODO: add test for GetMessages.
if __name__ == '__main__':
unittest.main()
|
py | 1a53eeba835540e67e8d96516d5bfe58249dbff9 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import six
from jacket.storage.i18n import _LW
from jacket.storage.scheduler.evaluator import evaluator
from jacket.storage.scheduler import filters
LOG = logging.getLogger(__name__)
class DriverFilter(filters.BaseHostFilter):
"""DriverFilter filters hosts based on a 'filter function' and metrics.
DriverFilter filters based on volume host's provided 'filter function'
and metrics.
"""
def host_passes(self, host_state, filter_properties):
"""Determines whether a host has a passing filter_function or not."""
stats = self._generate_stats(host_state, filter_properties)
LOG.debug("Checking host '%s'", stats['host_stats']['host'])
result = self._check_filter_function(stats)
LOG.debug("Result: %s", result)
LOG.debug("Done checking host '%s'", stats['host_stats']['host'])
return result
def _check_filter_function(self, stats):
"""Checks if a volume passes a host's filter function.
Returns a tuple in the format (filter_passing, filter_invalid).
Both values are booleans.
"""
if stats['filter_function'] is None:
LOG.debug("Filter function not set :: passing host")
return True
try:
filter_result = self._run_evaluator(stats['filter_function'],
stats)
except Exception as ex:
# Warn the admin for now that there is an error in the
# filter function.
LOG.warning(_LW("Error in filtering function "
"'%(function)s' : '%(error)s' :: failing host"),
{'function': stats['filter_function'],
'error': ex, })
return False
return filter_result
def _run_evaluator(self, func, stats):
"""Evaluates a given function using the provided available stats."""
host_stats = stats['host_stats']
host_caps = stats['host_caps']
extra_specs = stats['extra_specs']
qos_specs = stats['qos_specs']
volume_stats = stats['volume_stats']
result = evaluator.evaluate(
func,
extra=extra_specs,
stats=host_stats,
capabilities=host_caps,
volume=volume_stats,
qos=qos_specs)
return result
def _generate_stats(self, host_state, filter_properties):
"""Generates statistics from host and volume data."""
host_stats = {
'host': host_state.host,
'volume_backend_name': host_state.volume_backend_name,
'vendor_name': host_state.vendor_name,
'driver_version': host_state.driver_version,
'storage_protocol': host_state.storage_protocol,
'QoS_support': host_state.QoS_support,
'total_capacity_gb': host_state.total_capacity_gb,
'allocated_capacity_gb': host_state.allocated_capacity_gb,
'free_capacity_gb': host_state.free_capacity_gb,
'reserved_percentage': host_state.reserved_percentage,
'updated': host_state.updated,
}
host_caps = host_state.capabilities
filter_function = None
if ('filter_function' in host_caps and
host_caps['filter_function'] is not None):
filter_function = six.text_type(host_caps['filter_function'])
qos_specs = filter_properties.get('qos_specs', {})
volume_type = filter_properties.get('volume_type', {})
extra_specs = volume_type.get('extra_specs', {})
request_spec = filter_properties.get('request_spec', {})
volume_stats = request_spec.get('volume_properties', {})
stats = {
'host_stats': host_stats,
'host_caps': host_caps,
'extra_specs': extra_specs,
'qos_specs': qos_specs,
'volume_stats': volume_stats,
'volume_type': volume_type,
'filter_function': filter_function,
}
return stats
|
py | 1a53eed146d05da5561ba7e556281cc9f7862362 | from functools import wraps
from logging import getLogger
from os import chdir
import sys
import click
from codev import __version__
from .debug import DebugSettings
logger = getLogger(__name__)
def configuration_with_option(configuration, configuration_option):
return ':'.join(filter(bool, (configuration, configuration_option)))
def nice_exception(func):
@wraps(func)
def nice_exception_wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
if DebugSettings.settings.show_exception:
raise
if issubclass(type(e), click.ClickException) or issubclass(type(e), RuntimeError):
raise
# TODO log traceback to some logfile
logger.error(e)
return False
return nice_exception_wrapper
def path_option(func):
@wraps(func)
def path_wrapper(path, *args, **kwargs):
chdir(path)
return func(*args, **kwargs)
return click.option('-p', '--path',
default='./',
metavar='<path to repository>',
help='path to repository')(path_wrapper)
def bool_exit_enable(func):
@wraps(func)
def bool_exit(*args, **kwargs):
value = func(*args, **kwargs)
if value:
sys.exit(0)
else:
sys.exit(1)
return bool_exit
@click.group(invoke_without_command=True)
@click.option('--version', is_flag=True, help="Show version number and exit.")
@click.pass_context
def main(ctx, version):
if version:
click.echo(__version__)
elif not ctx.invoked_subcommand:
click.echo(ctx.get_help()) |
py | 1a53ef6bcd56c6326ce3c03cf985a15456ab3829 | import praw
import os
import sys
from datetime import date
import shutil
import requests
import mimetypes
import logging
import pprint
from redvid import Downloader
logger = logging.getLogger(__name__)
class SubredditScraper():
def __init__(self, subreddit, output, batch_size=10):
mimetypes.init()
self.subreddit = subreddit
self.batch_size = batch_size
self.output = output
def scrape(self):
for submission in self.subreddit.new(limit=self.batch_size):
self.process_submission(submission, self.output)
def download_media(self, media_path, media_metadata):
for media_id, item in media_metadata.items():
if item.get("e") == "Image":
image_url = item.get("s").get("u")
filename = os.path.join(media_path, media_id + mimetypes.guess_extension(item.get("m"), strict=False))
if os.path.exists(filename):
continue
r = requests.get(image_url, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
else:
logger.warn(f"unhandled media type in media_metadata: {item.get('e')}")
def download_image(self, media_path, url):
filename = os.path.join(media_path, url.split("/")[-1])
if os.path.exists(filename):
return
r = requests.get(url, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
def download_gifv(self, media_path, url):
src_file_name = url.split("/")[-1]
file_id = os.path.splitext(src_file_name)[0]
download_url = f"https://imgur.com/download/{file_id}"
filename = os.path.join(media_path, file_id+".mp4")
if os.path.exists(filename):
return
r = requests.get(download_url, stream = True)
if r.status_code == 200:
r.raw.decode_content = True
with open(filename,'wb') as f:
shutil.copyfileobj(r.raw, f)
def download_video(self, media_path, media, url):
for media_type, item in media.items():
if media_type == "reddit_video":
if item.get("transcoding_status") != "completed":
continue
Downloader(url=url, path = os.path.abspath(media_path), max_q=True).download()
else:
logger.warn(f"unhandled media type in media_metadata: {media_type}")
def process_submission(self, submission, scraping_path):
submission_path = os.path.join(scraping_path, submission.subreddit.display_name)
if not os.path.exists(submission_path):
os.mkdir(submission_path)
submission_path = os.path.join(submission_path, submission.id)
if not os.path.exists(submission_path):
logger.debug(f"new submission {submission.id} found: {submission.title}")
os.mkdir(submission_path)
# if submission.media:
submission_media_path = os.path.join(submission_path, "media")
if hasattr(submission,"media_metadata"):
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_media(submission_media_path, submission.media_metadata)
elif submission.is_video:
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_video(submission_media_path, submission.media,submission.url)
elif submission.url.endswith(".gifv"):
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_gifv(submission_media_path, submission.url)
elif submission.url.endswith(".jpg") or submission.url.endswith(".png") or submission.url.endswith(".jpeg"):
if not os.path.exists(submission_media_path):
os.mkdir(submission_media_path)
self.download_image(submission_media_path, submission.url)
else:
logger.warn(f"could not process {submission.permlink}")
|
py | 1a53f0793baf771bac87cebc7acdc0db84cd0054 | from ..models import (
Pronoun,
Request,
RequestInterest,
Role,
Skill,
Goal,
SpokenLanguage,
User,
)
from .base import BaseTestCase
class TestCreateModels(BaseTestCase):
def test_role_is_created(self):
self.assertEqual(Role.objects.count(), 3)
self.assertEqual(self.admin_role.role, "ADMIN")
def test_skills_is_created(self):
self.assertEqual(Skill.objects.count(), 2)
self.assertEqual(self.beginner_python.name, "Python Beginner")
def test_language_is_created(self):
self.assertEqual(SpokenLanguage.objects.count(), 1)
self.assertEqual(self.spoken_language.name, "English")
def test_pronoun_is_created(self):
self.assertEqual(Pronoun.objects.count(), 1)
self.assertEqual(self.pronoun.pronoun, "She/Her")
def test_user_is_created(self):
self.assertEqual(User.objects.count(), 5)
self.assertTrue(hasattr(self.profile, "skills"), True)
self.assertTrue(self.profile.skills, self.beginner_python.id)
def test_request_is_created(self):
self.assertEqual(Request.objects.count(), 1)
self.assertTrue(hasattr(self.request, "mentee"), True)
self.assertTrue(self.request.skill, self.intermediate_python)
def test_request_interest_is_created(self):
self.assertEqual(RequestInterest.objects.count(), 1)
self.assertTrue(hasattr(self.request_interest, "mentor"), True)
self.assertTrue(self.request_interest.status, "OPEN")
def test_goal_is_created(self):
self.assertEqual(Goal.objects.count(), 1)
self.assertTrue(hasattr(self.goal, "goal"), True)
self.assertTrue(self.goal.goal, "Learn TypeScript")
|
py | 1a53f165c2a32f92d71094ba4c8c7e48a389a309 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(name='gnumpy',
version='0.2',
description="Gnumpy is a simple Python module that interfaces in a way "
"almost identical to numpy, but does its computations on your "
"computer's GPU, using Cudamat.",
author='Tijmen Tieleman',
license='BSD-derived (see LICENSE)',
url='http://www.cs.toronto.edu/~tijmen/gnumpy.html',
) |
py | 1a53f20aab0e3a6588cd46d620edf2c605b11e87 | # Problem: https://www.hackerrank.com/challenges/python-mutations/problem
# Score: 10
def mutate_string(string, position, character):
return string[:position] + character + string[position + 1:]
|
py | 1a53f2f6033bc7f09de84cfade459d7dfc88367b | # MIT LICENSE
#
# Copyright 1997 - 2020 by IXIA Keysight
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class WriteAction(Base):
"""NOT DEFINED
The WriteAction class encapsulates a required writeAction resource which will be retrieved from the server every time the property is accessed.
"""
__slots__ = ()
_SDM_NAME = 'writeAction'
_SDM_ATT_MAP = {
'ExperimenterData': 'experimenterData',
'ExperimenterDataLength': 'experimenterDataLength',
'ExperimenterDataLengthMiss': 'experimenterDataLengthMiss',
'ExperimenterDataMiss': 'experimenterDataMiss',
'ExperimenterId': 'experimenterId',
'ExperimenterIdMiss': 'experimenterIdMiss',
}
def __init__(self, parent):
super(WriteAction, self).__init__(parent)
@property
def WriteActionMissType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.writeactionmisstype_e63eee24d23bc546c8138246edb35dfe.WriteActionMissType): An instance of the WriteActionMissType class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.writeactionmisstype_e63eee24d23bc546c8138246edb35dfe import WriteActionMissType
return WriteActionMissType(self)._select()
@property
def WriteActionType(self):
"""
Returns
-------
- obj(ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.writeactiontype_419a58f4451d612a6f8c7aafe22d0a6c.WriteActionType): An instance of the WriteActionType class
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
from ixnetwork_restpy.testplatform.sessions.ixnetwork.vport.protocols.writeactiontype_419a58f4451d612a6f8c7aafe22d0a6c import WriteActionType
return WriteActionType(self)._select()
@property
def ExperimenterData(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ExperimenterData'])
@ExperimenterData.setter
def ExperimenterData(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExperimenterData'], value)
@property
def ExperimenterDataLength(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ExperimenterDataLength'])
@ExperimenterDataLength.setter
def ExperimenterDataLength(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExperimenterDataLength'], value)
@property
def ExperimenterDataLengthMiss(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ExperimenterDataLengthMiss'])
@ExperimenterDataLengthMiss.setter
def ExperimenterDataLengthMiss(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExperimenterDataLengthMiss'], value)
@property
def ExperimenterDataMiss(self):
"""
Returns
-------
- str: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ExperimenterDataMiss'])
@ExperimenterDataMiss.setter
def ExperimenterDataMiss(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExperimenterDataMiss'], value)
@property
def ExperimenterId(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ExperimenterId'])
@ExperimenterId.setter
def ExperimenterId(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExperimenterId'], value)
@property
def ExperimenterIdMiss(self):
"""
Returns
-------
- number: NOT DEFINED
"""
return self._get_attribute(self._SDM_ATT_MAP['ExperimenterIdMiss'])
@ExperimenterIdMiss.setter
def ExperimenterIdMiss(self, value):
self._set_attribute(self._SDM_ATT_MAP['ExperimenterIdMiss'], value)
def update(self, ExperimenterData=None, ExperimenterDataLength=None, ExperimenterDataLengthMiss=None, ExperimenterDataMiss=None, ExperimenterId=None, ExperimenterIdMiss=None):
"""Updates writeAction resource on the server.
Args
----
- ExperimenterData (str): NOT DEFINED
- ExperimenterDataLength (number): NOT DEFINED
- ExperimenterDataLengthMiss (number): NOT DEFINED
- ExperimenterDataMiss (str): NOT DEFINED
- ExperimenterId (number): NOT DEFINED
- ExperimenterIdMiss (number): NOT DEFINED
Raises
------
- ServerError: The server has encountered an uncategorized error condition
"""
return self._update(self._map_locals(self._SDM_ATT_MAP, locals()))
|
py | 1a53f353704021ec46413d9532875e5d1a27f8af | # -*- coding: utf-8 -*-
from flask import url_for, current_app
from flask_login import current_user
from scout.server.extensions import store
def test_cases(app, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the cases page
resp = client.get(url_for('cases.cases',
institute_id=institute_obj['internal_id']))
# THEN it should return a page
assert resp.status_code == 200
# test query passing parameters in seach form
request_data = {
'limit' : '100',
'skip_assigned' : 'on',
'is_research' : 'on',
'query' : 'case_id'
}
resp = client.get(url_for('cases.cases',
institute_id=institute_obj['internal_id'], params=request_data))
# response should return a page
assert resp.status_code == 200
sorting_options = ['analysis_date', 'track', 'status']
for option in sorting_options:
# test query passing the sorting option to the cases view
request_data = {
'sort' : option
}
resp = client.get(url_for('cases.cases',
institute_id=institute_obj['internal_id'], params=request_data))
# response should return a page
assert resp.status_code == 200
def test_cases_query(app, case_obj, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
slice_query = case_obj['display_name']
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the cases page with a query
resp = client.get(url_for('cases.cases',
query=slice_query,
institute_id=institute_obj['internal_id']))
# THEN it should return a page
assert resp.status_code == 200
def test_cases_panel_query(app, case_obj, parsed_panel, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
slice_query = parsed_panel['panel_id']
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the cases page with a query
resp = client.get(url_for('cases.cases',
query=slice_query,
institute_id=institute_obj['internal_id']))
# THEN it should return a page
assert resp.status_code == 200
def test_institutes(app):
# GIVEN an initialized app
# GIVEN a valid user
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the institutes page
resp = client.get(url_for('cases.index'))
# THEN it should return a page
assert resp.status_code == 200
def test_case(app, case_obj, institute_obj):
# GIVEN an initialized app
# GIVEN a valid user, case and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the case page
resp = client.get(url_for('cases.case',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']))
# THEN it should return a page
assert resp.status_code == 200
def test_case_synopsis(app, institute_obj, case_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
req_data = {
'synopsis' : 'test synopsis'
}
# WHEN updating the synopsis of a case
resp = client.get(url_for('cases.case_synopsis',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name'],
data=req_data ))
# then it should return a redirected page
assert resp.status_code == 302
def test_causatives(app, user_obj, institute_obj, case_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
# There should be no causative variants for test case:
assert 'causatives' not in case_obj
var1_id = '4c7d5c70d955875504db72ef8e1abe77' # in POT1 gene
var2_id = 'e24b65bf27feacec6a81c8e9e19bd5f1' # in TBX1 gene
var_ids = [var1_id, var2_id]
# for each variant
for var_id in var_ids:
# update case by marking variant as causative:
variant_obj = store.variant(document_id=var_id)
store.mark_causative(
institute=institute_obj,
case=case_obj,
user=user_obj,
link='causative_var_link/{}'.format(variant_obj['_id']),
variant=variant_obj
)
updated_case = store.case_collection.find_one({'_id':case_obj['_id']})
# The above variants should be registered as causatives in case object
assert updated_case['causatives'] == var_ids
# Call scout causatives view and check if the above causatives are displayed
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the case page
resp = client.get(url_for('cases.causatives',
institute_id=institute_obj['internal_id']))
# THEN it should return a page
assert resp.status_code == 200
# with variant 1
assert var1_id in str(resp.data)
# and variant 2
assert var2_id in str(resp.data)
# Filter causatives by gene (POT1)
resp = client.get(url_for('cases.causatives',
institute_id=institute_obj['internal_id'],
query='17284 | POT1 (DKFZp586D211, hPot1, POT1)'))
# THEN it should return a page
assert resp.status_code == 200
# with variant 1
assert var1_id in str(resp.data)
# but NOT variant 2
assert var2_id not in str(resp.data)
def test_case_report(app, institute_obj, case_obj):
# Test the web page containing the general case report
# GIVEN an initialized app and a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# When clicking on 'general' button on case page
resp = client.get(url_for('cases.case_report',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']),
)
# a successful response should be returned
assert resp.status_code == 200
def test_case_diagnosis(app, institute_obj, case_obj):
# Test the web page containing the general case report
# GIVEN an initialized app and a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
req_data = {
'omim_id' : 'OMIM:615349'
}
# When updating an OMIM diagnosis for a case
resp = client.get(url_for('cases.case_diagnosis',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']),
data=req_data
)
# Response should be redirected to case page
assert resp.status_code == 302
def test_pdf_case_report(app, institute_obj, case_obj):
# Test the web page containing the general case report
# GIVEN an initialized app and a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# When clicking on 'Download PDF' button on general report page
resp = client.get(url_for('cases.pdf_case_report',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']),
)
# a successful response should be returned
assert resp.status_code == 200
def test_clinvar_submissions(app, institute_obj):
# Test the web page containing the clinvar submissions for an institute
# GIVEN an initialized app and a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# When visiting the clinvar submissiin page (get request)
resp = client.get(url_for('cases.clinvar_submissions',
institute_id=institute_obj['internal_id']))
# a successful response should be returned
assert resp.status_code == 200
def test_mt_report(app, institute_obj, case_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# When clicking on 'mtDNA report' on case page
resp = client.get(url_for('cases.mt_report',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']),
)
# a successful response should be returned
assert resp.status_code == 200
# and it should contain a zipped file, not HTML code
assert resp.mimetype == 'application/zip'
def test_matchmaker_add(app, institute_obj, case_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# WHEN accessing the case page
resp = client.post(url_for('cases.matchmaker_add',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']))
# page redirects in the views anyway, so it will return a 302 code
assert resp.status_code == 302
def test_matchmaker_matches(app, institute_obj, case_obj, mme_submission):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# add MME submission to case object
store.case_collection.find_one_and_update(
{'_id' : case_obj['_id']},
{'$set' : {
'mme_submission' : mme_submission
}}
)
res = store.case_collection.find({'mme_submission':{'$exists' : True}})
assert sum(1 for i in res) == 1
# Given mock MME connection parameters
current_app.config['MME_URL'] = 'http://fakey_mme_url:fakey_port'
current_app.config['MME_TOKEN'] = 'test_token'
# WHEN accessing the case page
resp = client.get(url_for('cases.matchmaker_matches',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']))
# page will redirect because controllers.mme_matches
# will not be able to contact a MME server
assert resp.status_code == 302
def test_matchmaker_match(app, institute_obj, case_obj, mme_submission):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# add MME submission to case object
store.case_collection.find_one_and_update(
{'_id' : case_obj['_id']},
{'$set' : {
'mme_submission' : mme_submission
}}
)
res = store.case_collection.find({'mme_submission':{'$exists' : True}})
assert sum(1 for i in res) == 1
# WHEN accessing the case page
resp = client.post(url_for('cases.matchmaker_match',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name'],
target='mock_node_id' ))
# page redirects in the views anyway, so it will return a 302 code
assert resp.status_code == 302
def test_matchmaker_delete(app, institute_obj, case_obj, mme_submission):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# add MME submission to case object
store.case_collection.find_one_and_update(
{'_id' : case_obj['_id']},
{'$set' : {
'mme_submission' : mme_submission
}}
)
res = store.case_collection.find({'mme_submission':{'$exists' : True}})
assert sum(1 for i in res) == 1
# WHEN accessing the case page
resp = client.post(url_for('cases.matchmaker_delete',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name']))
# page redirects in the views anyway, so it will return a 302 code
assert resp.status_code == 302
def test_status(app, institute_obj, case_obj, user_obj):
# GIVEN an initialized app
# GIVEN a valid user and institute
with app.test_client() as client:
# GIVEN that the user could be logged in
resp = client.get(url_for('auto_login'))
assert resp.status_code == 200
# make sure test case status is inactive
assert case_obj['status'] == 'inactive'
# use status view to update status for test case
request_data = {
'status' : 'prioritized'
}
resp = client.post(url_for('cases.status',
institute_id=institute_obj['internal_id'],
case_name=case_obj['display_name'], params=request_data))
assert resp.status_code == 302 # page should be redirected
|
py | 1a53f359634fc2d75f72bea6b1fdc87444c7d737 | #----------------------------------------WEATHER APPLICATION----------------------------------------
import tkinter as tk
import requests
from tkinter import font
#--------------------------------------FUNCTION FOR DISPLAYING THE WEATHER CONDITIONS------------------------
def get_result(weather):
try:
a = weather['name']
b = weather['weather'][0]['description']
c = weather['main']['temp']
feels_like = weather['main']['feels_like']
temp_min = weather['main']['temp_min']
temp_max = weather['main']['temp_max']
pressure = weather['main']['pressure']
humidity = weather['main']['humidity']
final_str = 'City = ' + str(a) + '\nConditions = ' + str(b) + '\nTemperature = ' + str(c) + '°C' + '\nFeels like = ' + str(feels_like) + '°C' + '\nTemperature min = ' + str(temp_min) + '°C' + '\nTemperature max = ' + str(temp_max) + '°C' + '\npressure = ' + str(pressure) + '\nHumidity = ' + str(humidity) + '%'
except:
final_str = 'There was a problem in retrieving the information'
return final_str
#------------------------------------FUNCTION FOR GETTING THE REQUIRED INFORMATION---------------------------------
def get_weather(enter):
weather_key = '2140004b122066c33d4c3361cd2ef42bFEW4252423'
url = 'https://api.openweathermap.org/data/2.5/weather'
pam = {'appid': weather_key, 'q': enter, 'units': 'Metric'}
response = requests.get(url, params=pam)
weather = response.json()
print(weather)
label['text'] = get_result(weather)
#----------------------------------------------CODE FOR THE USER INTERFACE------------------------------------
rex = tk.Tk()
rex.title('Weather')
canvas = tk.Canvas(rex, height=500, width=700)
canvas.pack()
background_image = tk.PhotoImage(file='po.png')
background_place = tk.Label(rex, image=background_image)
background_place.place(relwidth=1, relheight=1)
frame = tk.Frame(rex, bg='#C0C3CC', bd=4)
frame.place(relx=0.12, rely=0.12, relheight=0.1, relwidth=0.75)
entry = tk.Entry(frame, font=('Bahnschrift', 12))
entry.place(relx=0, rely=0, relheight=1, relwidth=0.65)
button = tk.Button(frame, text='Get Weather', font=('Bahnschrift', 12), command=lambda: get_weather(entry.get()))
button.place(relx=0.68, rely=0, relheight=1, relwidth=0.3)
down_frame = tk.Frame(rex, bg='#6BC1FF', bd=5, relief='groove')
down_frame.place(relx=0.12, rely=0.3, relheight=0.6, relwidth=0.75)
label = tk.Label(down_frame, font=('Courier', 12))
label.place(relx=0, rely=0, relheight=1, relwidth=1)
rex.mainloop() |
py | 1a53f3e7016b3b7061b24f301b89e5d2c622db1a | # (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import fnmatch
from ansible import constants as C
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import boolean
from ansible.playbook.block import Block
from ansible.playbook.task import Task
from ansible.utils.display import Display
display = Display()
__all__ = ['PlayIterator']
class HostState:
def __init__(self, blocks):
self._blocks = blocks[:]
self.cur_block = 0
self.cur_regular_task = 0
self.cur_rescue_task = 0
self.cur_always_task = 0
self.cur_dep_chain = None
self.run_state = PlayIterator.ITERATING_SETUP
self.fail_state = PlayIterator.FAILED_NONE
self.pending_setup = False
self.tasks_child_state = None
self.rescue_child_state = None
self.always_child_state = None
self.did_rescue = False
self.did_start_at_task = False
def __repr__(self):
return "HostState(%r)" % self._blocks
def __str__(self):
def _run_state_to_string(n):
states = ["ITERATING_SETUP", "ITERATING_TASKS", "ITERATING_RESCUE", "ITERATING_ALWAYS", "ITERATING_COMPLETE"]
try:
return states[n]
except IndexError:
return "UNKNOWN STATE"
def _failed_state_to_string(n):
states = {1: "FAILED_SETUP", 2: "FAILED_TASKS", 4: "FAILED_RESCUE", 8: "FAILED_ALWAYS"}
if n == 0:
return "FAILED_NONE"
else:
ret = []
for i in (1, 2, 4, 8):
if n & i:
ret.append(states[i])
return "|".join(ret)
return ("HOST STATE: block=%d, task=%d, rescue=%d, always=%d, run_state=%s, fail_state=%s, pending_setup=%s, tasks child state? (%s), "
"rescue child state? (%s), always child state? (%s), did rescue? %s, did start at task? %s" % (
self.cur_block,
self.cur_regular_task,
self.cur_rescue_task,
self.cur_always_task,
_run_state_to_string(self.run_state),
_failed_state_to_string(self.fail_state),
self.pending_setup,
self.tasks_child_state,
self.rescue_child_state,
self.always_child_state,
self.did_rescue,
self.did_start_at_task,
))
def __eq__(self, other):
if not isinstance(other, HostState):
return False
for attr in ('_blocks', 'cur_block', 'cur_regular_task', 'cur_rescue_task', 'cur_always_task',
'run_state', 'fail_state', 'pending_setup', 'cur_dep_chain',
'tasks_child_state', 'rescue_child_state', 'always_child_state'):
if getattr(self, attr) != getattr(other, attr):
return False
return True
def get_current_block(self):
return self._blocks[self.cur_block]
def copy(self):
new_state = HostState(self._blocks)
new_state.cur_block = self.cur_block
new_state.cur_regular_task = self.cur_regular_task
new_state.cur_rescue_task = self.cur_rescue_task
new_state.cur_always_task = self.cur_always_task
new_state.run_state = self.run_state
new_state.fail_state = self.fail_state
new_state.pending_setup = self.pending_setup
new_state.did_rescue = self.did_rescue
new_state.did_start_at_task = self.did_start_at_task
if self.cur_dep_chain is not None:
new_state.cur_dep_chain = self.cur_dep_chain[:]
if self.tasks_child_state is not None:
new_state.tasks_child_state = self.tasks_child_state.copy()
if self.rescue_child_state is not None:
new_state.rescue_child_state = self.rescue_child_state.copy()
if self.always_child_state is not None:
new_state.always_child_state = self.always_child_state.copy()
return new_state
class PlayIterator:
# the primary running states for the play iteration
ITERATING_SETUP = 0
ITERATING_TASKS = 1
ITERATING_RESCUE = 2
ITERATING_ALWAYS = 3
ITERATING_COMPLETE = 4
# the failure states for the play iteration, which are powers
# of 2 as they may be or'ed together in certain circumstances
FAILED_NONE = 0
FAILED_SETUP = 1
FAILED_TASKS = 2
FAILED_RESCUE = 4
FAILED_ALWAYS = 8
def __init__(self, inventory, play, play_context, variable_manager, all_vars, start_at_done=False):
self._play = play
self._blocks = []
self._variable_manager = variable_manager
# Default options to gather
gather_subset = self._play.gather_subset
gather_timeout = self._play.gather_timeout
fact_path = self._play.fact_path
setup_block = Block(play=self._play)
# Gathering facts with run_once would copy the facts from one host to
# the others.
setup_block.run_once = False
setup_task = Task(block=setup_block)
setup_task.action = 'gather_facts'
setup_task.name = 'Gathering Facts'
setup_task.args = {
'gather_subset': gather_subset,
}
# Unless play is specifically tagged, gathering should 'always' run
if not self._play.tags:
setup_task.tags = ['always']
if gather_timeout:
setup_task.args['gather_timeout'] = gather_timeout
if fact_path:
setup_task.args['fact_path'] = fact_path
setup_task.set_loader(self._play._loader)
# short circuit fact gathering if the entire playbook is conditional
if self._play._included_conditional is not None:
setup_task.when = self._play._included_conditional[:]
setup_block.block = [setup_task]
setup_block = setup_block.filter_tagged_tasks(all_vars)
self._blocks.append(setup_block)
for block in self._play.compile():
new_block = block.filter_tagged_tasks(all_vars)
if new_block.has_tasks():
self._blocks.append(new_block)
self._host_states = {}
start_at_matched = False
batch = inventory.get_hosts(self._play.hosts, order=self._play.order)
self.batch_size = len(batch)
for host in batch:
self._host_states[host.name] = HostState(blocks=self._blocks)
# if we're looking to start at a specific task, iterate through
# the tasks for this host until we find the specified task
if play_context.start_at_task is not None and not start_at_done:
while True:
(s, task) = self.get_next_task_for_host(host, peek=True)
if s.run_state == self.ITERATING_COMPLETE:
break
if task.name == play_context.start_at_task or (task.name and fnmatch.fnmatch(task.name, play_context.start_at_task)) or \
task.get_name() == play_context.start_at_task or fnmatch.fnmatch(task.get_name(), play_context.start_at_task):
start_at_matched = True
break
else:
self.get_next_task_for_host(host)
# finally, reset the host's state to ITERATING_SETUP
if start_at_matched:
self._host_states[host.name].did_start_at_task = True
self._host_states[host.name].run_state = self.ITERATING_SETUP
if start_at_matched:
# we have our match, so clear the start_at_task field on the
# play context to flag that we've started at a task (and future
# plays won't try to advance)
play_context.start_at_task = None
def get_host_state(self, host):
# Since we're using the PlayIterator to carry forward failed hosts,
# in the event that a previous host was not in the current inventory
# we create a stub state for it now
if host.name not in self._host_states:
self._host_states[host.name] = HostState(blocks=[])
return self._host_states[host.name].copy()
def cache_block_tasks(self, block):
# now a noop, we've changed the way we do caching and finding of
# original task entries, but just in case any 3rd party strategies
# are using this we're leaving it here for now
return
def get_next_task_for_host(self, host, peek=False):
display.debug("getting the next task for host %s" % host.name)
s = self.get_host_state(host)
task = None
if s.run_state == self.ITERATING_COMPLETE:
display.debug("host %s is done iterating, returning" % host.name)
return (s, None)
(s, task) = self._get_next_task_from_state(s, host=host, peek=peek)
if not peek:
self._host_states[host.name] = s
display.debug("done getting next task for host %s" % host.name)
display.debug(" ^ task is: %s" % task)
display.debug(" ^ state is: %s" % s)
return (s, task)
def _get_next_task_from_state(self, state, host, peek, in_child=False):
task = None
# try and find the next task, given the current state.
while True:
# try to get the current block from the list of blocks, and
# if we run past the end of the list we know we're done with
# this block
try:
block = state._blocks[state.cur_block]
except IndexError:
state.run_state = self.ITERATING_COMPLETE
return (state, None)
if state.run_state == self.ITERATING_SETUP:
# First, we check to see if we were pending setup. If not, this is
# the first trip through ITERATING_SETUP, so we set the pending_setup
# flag and try to determine if we do in fact want to gather facts for
# the specified host.
if not state.pending_setup:
state.pending_setup = True
# Gather facts if the default is 'smart' and we have not yet
# done it for this host; or if 'explicit' and the play sets
# gather_facts to True; or if 'implicit' and the play does
# NOT explicitly set gather_facts to False.
gathering = C.DEFAULT_GATHERING
implied = self._play.gather_facts is None or boolean(self._play.gather_facts, strict=False)
if (gathering == 'implicit' and implied) or \
(gathering == 'explicit' and boolean(self._play.gather_facts, strict=False)) or \
(gathering == 'smart' and implied and not (self._variable_manager._fact_cache.get(host.name, {}).get('_ansible_facts_gathered', False))):
# The setup block is always self._blocks[0], as we inject it
# during the play compilation in __init__ above.
setup_block = self._blocks[0]
if setup_block.has_tasks() and len(setup_block.block) > 0:
task = setup_block.block[0]
else:
# This is the second trip through ITERATING_SETUP, so we clear
# the flag and move onto the next block in the list while setting
# the run state to ITERATING_TASKS
state.pending_setup = False
state.run_state = self.ITERATING_TASKS
if not state.did_start_at_task:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
elif state.run_state == self.ITERATING_TASKS:
# clear the pending setup flag, since we're past that and it didn't fail
if state.pending_setup:
state.pending_setup = False
# First, we check for a child task state that is not failed, and if we
# have one recurse into it for the next task. If we're done with the child
# state, we clear it and drop back to getting the next task from the list.
if state.tasks_child_state:
(state.tasks_child_state, task) = self._get_next_task_from_state(state.tasks_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.tasks_child_state):
# failed child state, so clear it and move into the rescue portion
state.tasks_child_state = None
self._set_failed_state(state)
else:
# get the next task recursively
if task is None or state.tasks_child_state.run_state == self.ITERATING_COMPLETE:
# we're done with the child state, so clear it and continue
# back to the top of the loop to get the next task
state.tasks_child_state = None
continue
else:
# First here, we check to see if we've failed anywhere down the chain
# of states we have, and if so we move onto the rescue portion. Otherwise,
# we check to see if we've moved past the end of the list of tasks. If so,
# we move into the always portion of the block, otherwise we get the next
# task from the list.
if self._check_failed_state(state):
state.run_state = self.ITERATING_RESCUE
elif state.cur_regular_task >= len(block.block):
state.run_state = self.ITERATING_ALWAYS
else:
task = block.block[state.cur_regular_task]
# if the current task is actually a child block, create a child
# state for us to recurse into on the next pass
if isinstance(task, Block):
state.tasks_child_state = HostState(blocks=[task])
state.tasks_child_state.run_state = self.ITERATING_TASKS
# since we've created the child state, clear the task
# so we can pick up the child state on the next pass
task = None
state.cur_regular_task += 1
elif state.run_state == self.ITERATING_RESCUE:
# The process here is identical to ITERATING_TASKS, except instead
# we move into the always portion of the block.
if host.name in self._play._removed_hosts:
self._play._removed_hosts.remove(host.name)
if state.rescue_child_state:
(state.rescue_child_state, task) = self._get_next_task_from_state(state.rescue_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.rescue_child_state):
state.rescue_child_state = None
self._set_failed_state(state)
else:
if task is None or state.rescue_child_state.run_state == self.ITERATING_COMPLETE:
state.rescue_child_state = None
continue
else:
if state.fail_state & self.FAILED_RESCUE == self.FAILED_RESCUE:
state.run_state = self.ITERATING_ALWAYS
elif state.cur_rescue_task >= len(block.rescue):
if len(block.rescue) > 0:
state.fail_state = self.FAILED_NONE
state.run_state = self.ITERATING_ALWAYS
state.did_rescue = True
else:
task = block.rescue[state.cur_rescue_task]
if isinstance(task, Block):
state.rescue_child_state = HostState(blocks=[task])
state.rescue_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_rescue_task += 1
elif state.run_state == self.ITERATING_ALWAYS:
# And again, the process here is identical to ITERATING_TASKS, except
# instead we either move onto the next block in the list, or we set the
# run state to ITERATING_COMPLETE in the event of any errors, or when we
# have hit the end of the list of blocks.
if state.always_child_state:
(state.always_child_state, task) = self._get_next_task_from_state(state.always_child_state, host=host, peek=peek, in_child=True)
if self._check_failed_state(state.always_child_state):
state.always_child_state = None
self._set_failed_state(state)
else:
if task is None or state.always_child_state.run_state == self.ITERATING_COMPLETE:
state.always_child_state = None
continue
else:
if state.cur_always_task >= len(block.always):
if state.fail_state != self.FAILED_NONE:
state.run_state = self.ITERATING_COMPLETE
else:
state.cur_block += 1
state.cur_regular_task = 0
state.cur_rescue_task = 0
state.cur_always_task = 0
state.run_state = self.ITERATING_TASKS
state.tasks_child_state = None
state.rescue_child_state = None
state.always_child_state = None
state.did_rescue = False
# we're advancing blocks, so if this was an end-of-role block we
# mark the current role complete
if block._eor and host.name in block._role._had_task_run and not in_child and not peek:
block._role._completed[host.name] = True
else:
task = block.always[state.cur_always_task]
if isinstance(task, Block):
state.always_child_state = HostState(blocks=[task])
state.always_child_state.run_state = self.ITERATING_TASKS
task = None
state.cur_always_task += 1
elif state.run_state == self.ITERATING_COMPLETE:
return (state, None)
# if something above set the task, break out of the loop now
if task:
break
return (state, task)
def _set_failed_state(self, state):
if state.run_state == self.ITERATING_SETUP:
state.fail_state |= self.FAILED_SETUP
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state is not None:
state.tasks_child_state = self._set_failed_state(state.tasks_child_state)
else:
state.fail_state |= self.FAILED_TASKS
if state._blocks[state.cur_block].rescue:
state.run_state = self.ITERATING_RESCUE
elif state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state is not None:
state.rescue_child_state = self._set_failed_state(state.rescue_child_state)
else:
state.fail_state |= self.FAILED_RESCUE
if state._blocks[state.cur_block].always:
state.run_state = self.ITERATING_ALWAYS
else:
state.run_state = self.ITERATING_COMPLETE
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state is not None:
state.always_child_state = self._set_failed_state(state.always_child_state)
else:
state.fail_state |= self.FAILED_ALWAYS
state.run_state = self.ITERATING_COMPLETE
return state
def mark_host_failed(self, host):
s = self.get_host_state(host)
display.debug("marking host %s failed, current state: %s" % (host, s))
s = self._set_failed_state(s)
display.debug("^ failed state is now: %s" % s)
self._host_states[host.name] = s
self._play._removed_hosts.append(host.name)
def get_failed_hosts(self):
return dict((host, True) for (host, state) in iteritems(self._host_states) if self._check_failed_state(state))
def _check_failed_state(self, state):
if state is None:
return False
elif state.run_state == self.ITERATING_RESCUE and self._check_failed_state(state.rescue_child_state):
return True
elif state.run_state == self.ITERATING_ALWAYS and self._check_failed_state(state.always_child_state):
return True
elif state.fail_state != self.FAILED_NONE:
if state.run_state == self.ITERATING_RESCUE and state.fail_state & self.FAILED_RESCUE == 0:
return False
elif state.run_state == self.ITERATING_ALWAYS and state.fail_state & self.FAILED_ALWAYS == 0:
return False
else:
return not (state.did_rescue and state.fail_state & self.FAILED_ALWAYS == 0)
elif state.run_state == self.ITERATING_TASKS and self._check_failed_state(state.tasks_child_state):
cur_block = state._blocks[state.cur_block]
if len(cur_block.rescue) > 0 and state.fail_state & self.FAILED_RESCUE == 0:
return False
else:
return True
return False
def is_failed(self, host):
s = self.get_host_state(host)
return self._check_failed_state(s)
def get_active_state(self, state):
'''
Finds the active state, recursively if necessary when there are child states.
'''
if state.run_state == self.ITERATING_TASKS and state.tasks_child_state is not None:
return self.get_active_state(state.tasks_child_state)
elif state.run_state == self.ITERATING_RESCUE and state.rescue_child_state is not None:
return self.get_active_state(state.rescue_child_state)
elif state.run_state == self.ITERATING_ALWAYS and state.always_child_state is not None:
return self.get_active_state(state.always_child_state)
return state
def is_any_block_rescuing(self, state):
'''
Given the current HostState state, determines if the current block, or any child blocks,
are in rescue mode.
'''
if state.run_state == self.ITERATING_RESCUE:
return True
if state.tasks_child_state is not None:
return self.is_any_block_rescuing(state.tasks_child_state)
return False
def get_original_task(self, host, task):
# now a noop because we've changed the way we do caching
return (None, None)
def _insert_tasks_into_state(self, state, task_list):
# if we've failed at all, or if the task list is empty, just return the current state
if state.fail_state != self.FAILED_NONE and state.run_state not in (self.ITERATING_RESCUE, self.ITERATING_ALWAYS) or not task_list:
return state
if state.run_state == self.ITERATING_TASKS:
if state.tasks_child_state:
state.tasks_child_state = self._insert_tasks_into_state(state.tasks_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.block[:state.cur_regular_task]
after = target_block.block[state.cur_regular_task:]
target_block.block = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_RESCUE:
if state.rescue_child_state:
state.rescue_child_state = self._insert_tasks_into_state(state.rescue_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.rescue[:state.cur_rescue_task]
after = target_block.rescue[state.cur_rescue_task:]
target_block.rescue = before + task_list + after
state._blocks[state.cur_block] = target_block
elif state.run_state == self.ITERATING_ALWAYS:
if state.always_child_state:
state.always_child_state = self._insert_tasks_into_state(state.always_child_state, task_list)
else:
target_block = state._blocks[state.cur_block].copy()
before = target_block.always[:state.cur_always_task]
after = target_block.always[state.cur_always_task:]
target_block.always = before + task_list + after
state._blocks[state.cur_block] = target_block
return state
def add_tasks(self, host, task_list):
self._host_states[host.name] = self._insert_tasks_into_state(self.get_host_state(host), task_list)
|
py | 1a53f3faace45461460ad1572db8baa718c73952 | # -*- coding: utf-8 -*-
import copy
import logging
import warnings
from ruamel.yaml import YAML
from great_expectations.data_context.util import (
instantiate_class_from_config,
load_class,
verify_dynamic_loading_support,
)
from great_expectations.exceptions import ClassInstantiationError
from great_expectations.types import ClassConfig
logger = logging.getLogger(__name__)
yaml = YAML()
yaml.default_flow_style = False
class Datasource(object):
"""
A Datasource connects to a compute environment and one or more storage environments and produces batches of data
that Great Expectations can validate in that compute environment.
Each Datasource provides Batches connected to a specific compute environment, such as a
SQL database, a Spark cluster, or a local in-memory Pandas DataFrame.
Datasources use Batch Kwargs to specify instructions for how to access data from
relevant sources such as an existing object from a DAG runner, a SQL database, S3 bucket, or local filesystem.
To bridge the gap between those worlds, Datasources interact closely with *generators* which
are aware of a source of data and can produce produce identifying information, called
"batch_kwargs" that datasources can use to get individual batches of data. They add flexibility
in how to obtain data such as with time-based partitioning, downsampling, or other techniques
appropriate for the datasource.
For example, a batch kwargs generator could produce a SQL query that logically represents "rows in the Events
table with a timestamp on February 7, 2012," which a SqlAlchemyDatasource could use to materialize
a SqlAlchemyDataset corresponding to that batch of data and ready for validation.
Since opinionated DAG managers such as airflow, dbt, prefect.io, dagster can also act as datasources
and/or batch kwargs generators for a more generic datasource.
When adding custom expectations by subclassing an existing DataAsset type, use the data_asset_type parameter
to configure the datasource to load and return DataAssets of the custom type.
--ge-feature-maturity-info--
id: datasource_s3
title: Datasource - S3
icon:
short_description: S3
description: Support for connecting to Amazon Web Services S3 as an external datasource.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_pandas_s3_datasource.html
maturity: Production
maturity_details:
api_stability: medium
implementation_completeness: Complete
unit_test_coverage:: Complete
integration_infrastructure_test_coverage: None
documentation_completeness: Minimal/Spotty
bug_risk: Low
id: datasource_filesystem
title: Datasource - Filesystem
icon:
short_description: File-based datsource
description: Support for using a mounted filesystem as an external datasource.
how_to_guide_url: https://docs.greatexpectations.io/en/latest/how_to_guides/configuring_datasources/how_to_configure_a_pandas_filesystem_datasource.html
maturity: Production
maturity_details:
api_stability: Medium
implementation_completeness: Complete
unit_test_coverage: Complete
integration_infrastructure_test_coverage: Partial
documentation_completeness: Partial
bug_risk: Low (Moderate for Windows users because of path issues)
id: datasource_gcs
title: Datasource - GCS
icon:
short_description: GCS
description: Support for Google Cloud Storage as an external datasource
how_to_guide_url:
maturity: Experimental
maturity_details:
api_stability: Medium (supported via native ‘gs://' syntax in Pandas and Pyspark; medium because we expect configuration to evolve)
implementation_completeness: Medium (works via passthrough, not via CLI)
unit_test_coverage: Minimal
integration_infrastructure_test_coverage: Minimal
documentation_completeness: Minimal
bug_risk: Moderate
id: datasource_azure_blob_storage
title: Datasource - Azure Blob Storage
icon:
short_description: Azure Blob Storage
description: Support for Microsoft Azure Blob Storage as an external datasource
how_to_guide_url:
maturity: In Roadmap (Sub-Experimental - "Not Impossible")
maturity_details:
api_stability: N/A (Supported on Databricks Spark via ‘wasb://' / ‘wasps://' url; requires local download first for Pandas)
implementation_completeness: Minimal
unit_test_coverage: N/A
integration_infrastructure_test_coverage: N/A
documentation_completeness: Minimal
bug_risk: Unknown
--ge-feature-maturity-info--
"""
recognized_batch_parameters = {"limit"}
@classmethod
def from_configuration(cls, **kwargs):
"""
Build a new datasource from a configuration dictionary.
Args:
**kwargs: configuration key-value pairs
Returns:
datasource (Datasource): the newly-created datasource
"""
return cls(**kwargs)
@classmethod
def build_configuration(
cls,
class_name,
module_name="great_expectations.datasource",
data_asset_type=None,
batch_kwargs_generators=None,
**kwargs
):
"""
Build a full configuration object for a datasource, potentially including batch kwargs generators with defaults.
Args:
class_name: The name of the class for which to build the config
module_name: The name of the module in which the datasource class is located
data_asset_type: A ClassConfig dictionary
batch_kwargs_generators: BatchKwargGenerators configuration dictionary
**kwargs: Additional kwargs to be part of the datasource constructor's initialization
Returns:
A complete datasource configuration.
"""
verify_dynamic_loading_support(module_name=module_name)
class_ = load_class(class_name=class_name, module_name=module_name)
configuration = class_.build_configuration(
data_asset_type=data_asset_type,
batch_kwargs_generators=batch_kwargs_generators,
**kwargs
)
return configuration
def __init__(
self,
name,
data_context=None,
data_asset_type=None,
batch_kwargs_generators=None,
**kwargs
):
"""
Build a new datasource.
Args:
name: the name for the datasource
data_context: data context to which to connect
data_asset_type (ClassConfig): the type of DataAsset to produce
batch_kwargs_generators: BatchKwargGenerators to add to the datasource
"""
self._data_context = data_context
self._name = name
if isinstance(data_asset_type, str):
warnings.warn(
"String-only configuration for data_asset_type is deprecated. Use module_name and class_name instead.",
DeprecationWarning,
)
self._data_asset_type = data_asset_type
self._datasource_config = kwargs
self._batch_kwargs_generators = {}
self._datasource_config["data_asset_type"] = data_asset_type
if batch_kwargs_generators is not None:
self._datasource_config["batch_kwargs_generators"] = batch_kwargs_generators
@property
def name(self):
"""
Property for datasource name
"""
return self._name
@property
def config(self):
return copy.deepcopy(self._datasource_config)
@property
def data_context(self):
"""
Property for attached DataContext
"""
return self._data_context
def _build_generators(self):
"""
Build batch kwargs generator objects from the datasource configuration.
Returns:
None
"""
try:
for generator in self._datasource_config["batch_kwargs_generators"].keys():
self.get_batch_kwargs_generator(generator)
except KeyError:
pass
def add_batch_kwargs_generator(self, name, class_name, **kwargs):
"""Add a BatchKwargGenerator to the datasource.
Args:
name (str): the name of the new BatchKwargGenerator to add
class_name: class of the BatchKwargGenerator to add
kwargs: additional keyword arguments will be passed directly to the new BatchKwargGenerator's constructor
Returns:
BatchKwargGenerator (BatchKwargGenerator)
"""
kwargs["class_name"] = class_name
generator = self._build_batch_kwargs_generator(**kwargs)
if "batch_kwargs_generators" not in self._datasource_config:
self._datasource_config["batch_kwargs_generators"] = dict()
self._datasource_config["batch_kwargs_generators"][name] = kwargs
return generator
def _build_batch_kwargs_generator(self, **kwargs):
"""Build a BatchKwargGenerator using the provided configuration and return the newly-built generator."""
generator = instantiate_class_from_config(
config=kwargs,
runtime_environment={"datasource": self},
config_defaults={
"module_name": "great_expectations.datasource.batch_kwargs_generator"
},
)
if not generator:
raise ClassInstantiationError(
module_name="great_expectations.datasource.batch_kwargs_generator",
package_name=None,
class_name=kwargs["class_name"],
)
return generator
def get_batch_kwargs_generator(self, name):
"""Get the (named) BatchKwargGenerator from a datasource)
Args:
name (str): name of BatchKwargGenerator (default value is 'default')
Returns:
BatchKwargGenerator (BatchKwargGenerator)
"""
if name in self._batch_kwargs_generators:
return self._batch_kwargs_generators[name]
elif (
"batch_kwargs_generators" in self._datasource_config
and name in self._datasource_config["batch_kwargs_generators"]
):
generator_config = copy.deepcopy(
self._datasource_config["batch_kwargs_generators"][name]
)
else:
raise ValueError(
"Unable to load batch kwargs generator %s -- no configuration found or invalid configuration."
% name
)
generator = self._build_batch_kwargs_generator(**generator_config)
self._batch_kwargs_generators[name] = generator
return generator
def list_batch_kwargs_generators(self):
"""List currently-configured BatchKwargGenerator for this datasource.
Returns:
List(dict): each dictionary includes "name" and "type" keys
"""
generators = []
if "batch_kwargs_generators" in self._datasource_config:
for key, value in self._datasource_config[
"batch_kwargs_generators"
].items():
generators.append({"name": key, "class_name": value["class_name"]})
return generators
def process_batch_parameters(self, limit=None, dataset_options=None):
"""Use datasource-specific configuration to translate any batch parameters into batch kwargs at the datasource
level.
Args:
limit (int): a parameter all datasources must accept to allow limiting a batch to a smaller number of rows.
dataset_options (dict): a set of kwargs that will be passed to the constructor of a dataset built using
these batch_kwargs
Returns:
batch_kwargs: Result will include both parameters passed via argument and configured parameters.
"""
batch_kwargs = self._datasource_config.get("batch_kwargs", {})
if limit is not None:
batch_kwargs["limit"] = limit
if dataset_options is not None:
# Then update with any locally-specified reader options
if not batch_kwargs.get("dataset_options"):
batch_kwargs["dataset_options"] = dict()
batch_kwargs["dataset_options"].update(dataset_options)
return batch_kwargs
def get_batch(self, batch_kwargs, batch_parameters=None):
"""Get a batch of data from the datasource.
Args:
batch_kwargs: the BatchKwargs to use to construct the batch
batch_parameters: optional parameters to store as the reference description of the batch. They should
reflect parameters that would provide the passed BatchKwargs.
Returns:
Batch
"""
raise NotImplementedError
def get_available_data_asset_names(self, batch_kwargs_generator_names=None):
"""
Returns a dictionary of data_asset_names that the specified batch kwarg
generator can provide. Note that some batch kwargs generators may not be
capable of describing specific named data assets, and some (such as
filesystem glob batch kwargs generators) require the user to configure
data asset names.
Args:
batch_kwargs_generator_names: the BatchKwargGenerator for which to get available data asset names.
Returns:
dictionary consisting of sets of generator assets available for the specified generators:
::
{
generator_name: {
names: [ (data_asset_1, data_asset_1_type), (data_asset_2, data_asset_2_type) ... ]
}
...
}
"""
available_data_asset_names = {}
if batch_kwargs_generator_names is None:
batch_kwargs_generator_names = [
generator["name"] for generator in self.list_batch_kwargs_generators()
]
elif isinstance(batch_kwargs_generator_names, str):
batch_kwargs_generator_names = [batch_kwargs_generator_names]
for generator_name in batch_kwargs_generator_names:
generator = self.get_batch_kwargs_generator(generator_name)
available_data_asset_names[
generator_name
] = generator.get_available_data_asset_names()
return available_data_asset_names
def build_batch_kwargs(
self, batch_kwargs_generator, data_asset_name=None, partition_id=None, **kwargs
):
if kwargs.get("name"):
if data_asset_name:
raise ValueError(
"Cannot provide both 'name' and 'data_asset_name'. Please use 'data_asset_name' only."
)
warnings.warn(
"name is being deprecated as a batch_parameter. Please use data_asset_name instead.",
DeprecationWarning,
)
data_asset_name = kwargs.pop("name")
generator_obj = self.get_batch_kwargs_generator(batch_kwargs_generator)
if partition_id is not None:
kwargs["partition_id"] = partition_id
return generator_obj.build_batch_kwargs(
data_asset_name=data_asset_name, **kwargs
)
|
py | 1a53f5b30014a03e102394d92dac55a1d24fc2b6 | # Advent of Code 2020
#
# From https://adventofcode.com/2020/day/10
#
from collections import Counter
from math import prod
import networkx as nx
import numpy as np
adapters = np.sort(np.array(list(map(int, [row.strip() for row in open('../inputs/Advent2020_10.txt', 'r')]))))
adapters = np.insert(adapters, 0, 0., axis=0)
adapters = np.append(adapters, adapters[-1] + 3)
differences = adapters[1:] - adapters[:-1]
counts = Counter(differences)
print(f"AoC 2020 Day 10, Part 1 answer is {counts[1] * counts[3]}")
graphs = []
new = True
for ix, adapter in enumerate(adapters[:-1]):
if new:
graphs.append(nx.DiGraph())
for x in range(1, 4):
if ix + x > len(adapters) - 1 or adapters[ix + x] - adapter > 3:
break
graphs[-1].add_edge(adapter, adapters[ix + x])
new = x == 1 and adapters[ix + x] - adapter == 3
paths = []
for graph in graphs:
paths.append(len(list(nx.all_simple_paths(graph, min(graph.nodes), max(graph.nodes)))))
print(f"AoC 2020 Day 10, Part 1 answer is {prod(paths)}")
|
py | 1a53f5ca0f060aa3e695017c9b0ccf77f7c342c1 | from .node import Node
class BaseFrontier:
"""Base class for Frontier Data Structure
class abstracts the addition , removal of nodes in the frontier
Attributes:
frontier (:obj:`list` of :obj:Node): list of nodes in the frontier
"""
def __init__(self):
self.frontier = []
def push(self, node: Node):
""" Handles addition of nodes to the frontier
Args:
node: Node to be added to the frontier.
Returns: None
"""
self.frontier.append(node)
def pop(self):
# handles popping of values from the frontier
raise NotImplementedError("Implement remove method in child classes")
def is_empty(self):
""" Checks if the frontier is empty.
Returns:
True if frontier is empty,False if frontier is not empty
"""
return len(self.frontier) == 0
def contains_state(self, state: any):
""" Checks if the frontier contains a specific state.
Args:
state: current state of a node.
Returns:
True if any of the nodes in the frontier have equal state,
False if none of the nodes in the frontier have equal state
"""
# check if any of the nodes in the frontier have the same
return any(node.state == state for node in self.frontier)
class StackFrontier(BaseFrontier):
"""Frontier that is implemented using a Stack A.D.S
It is based on the principle that a stack uses the LAST-IN-FIRST-OUT flow in
determining what will be popped out of the Frontier.
Hence we pop the last element added to the list
Attributes:
frontier (:obj:`list` of :obj:Node): list of nodes in the frontier
"""
def pop(self):
""" Handles removal and returning of removed nodes from the frontier
Note:
This method returns the last element added to the list.
Returns:
Node which has been removed from the frontier
"""
if self.is_empty():
raise Exception("empty frontier")
# get the last element in the frontier list
node = self.frontier[-1]
# return the frontier list without the removed node
self.frontier = self.frontier[:-1]
return node
class QueueFrontier(BaseFrontier):
"""Frontier that is implemented using a Queue A.D.S
It is based on the principle that a stack used the FIRST-IN-FIRST-OUT flow in
determining what will be popped out of the Frontier.
Thus we pop the first element added to the list
Attributes:
frontier (:obj:`list` of :obj:Node): list of nodes in the frontier
"""
def pop(self):
"""Handles removal and returning of removed nodes from the frontier
Note:
This method returns the first element added to the list.
Returns:
Node which has been removed from the frontier
"""
if self.is_empty():
raise Exception("empty frontier")
# get the first node in the frontier
node = self.frontier[0]
# set new frontier as old frontier without the first element
self.frontier = self.frontier[1:]
return node
|
py | 1a53f5f3d33372fdf62733464292966d0cd71459 | # Copyright (c) 2014 Vlad Temian <[email protected]>
# Copyright (c) 2015-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2017 guillaume2 <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Hugo van Kemenade <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Clément Pit-Claudel <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/LICENSE
"""JSON reporter"""
import json
import sys
from pylint.interfaces import IReporter
from pylint.reporters.base_reporter import BaseReporter
class JSONReporter(BaseReporter):
"""Report messages and layouts in JSON."""
__implements__ = IReporter
name = "json"
extension = "json"
def __init__(self, output=None):
BaseReporter.__init__(self, output or sys.stdout)
self.messages = []
def handle_message(self, msg):
"""Manage message of different type and in the context of path."""
self.messages.append(
{
"type": msg.category,
"module": msg.module,
"obj": msg.obj,
"line": msg.line,
"column": msg.column,
"path": msg.path,
"symbol": msg.symbol,
"message": msg.msg or "",
"message-id": msg.msg_id,
}
)
def display_messages(self, layout):
"""Launch layouts display"""
print(json.dumps(self.messages, indent=4), file=self.out)
def display_reports(self, layout):
"""Don't do anything in this reporter."""
def _display(self, layout):
"""Do nothing."""
def register(linter):
"""Register the reporter classes with the linter."""
linter.register_reporter(JSONReporter)
|
py | 1a53f6c6884421ace44d6f2ad18bc7dd9cdbc71f | #!/usr/bin/env python
# coding=utf-8
#
# Copyright (c) 2013-2015 First Flamingo Enterprise B.V.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# run.py
# firstflamingo/treinenaapje
#
# Created by Berend Schotanus on 11-Jan-13.
#
import sys, unittest
def main():
if len(sys.argv) == 2:
moduleName = sys.argv[1]
else:
moduleName = '*'
pattern = 'Test' + moduleName + '.py'
sys.path.insert(0, SDK_PATH)
sys.path.insert(0, CODE_PATH)
import dev_appserver
dev_appserver.fix_sys_path()
suite = unittest.loader.TestLoader().discover(TEST_PATH, pattern=pattern)
unittest.TextTestRunner(verbosity=2).run(suite)
if __name__ == '__main__':
main()
|
py | 1a53f806f1d4807f22573d01c87b4839b3d20f0d | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Registry responsible for built-in keras classes."""
import tensorflow as tf
# TODO(b/139939526): move to public API.
from tensorflow.python.keras.engine.base_layer import TensorFlowOpLayer
from tensorflow_model_optimization.python.core.sparsity.keras import prunable_layer
layers = tf.keras.layers
class PruneRegistry(object):
"""Registry responsible for built-in keras layers."""
# The keys represent built-in keras layers and the values represent the
# the variables within the layers which hold the kernel weights. This
# allows the wrapper to access and modify the weights.
_LAYERS_WEIGHTS_MAP = {
layers.ELU: [],
layers.LeakyReLU: [],
layers.ReLU: [],
layers.Softmax: [],
layers.ThresholdedReLU: [],
layers.Conv1D: ['kernel'],
layers.Conv2D: ['kernel'],
layers.Conv2DTranspose: ['kernel'],
layers.Conv3D: ['kernel'],
layers.Conv3DTranspose: ['kernel'],
layers.Cropping1D: [],
layers.Cropping2D: [],
layers.Cropping3D: [],
layers.DepthwiseConv2D: [],
layers.SeparableConv1D: ['pointwise_kernel'],
layers.SeparableConv2D: ['pointwise_kernel'],
layers.UpSampling1D: [],
layers.UpSampling2D: [],
layers.UpSampling3D: [],
layers.ZeroPadding1D: [],
layers.ZeroPadding2D: [],
layers.ZeroPadding3D: [],
layers.Activation: [],
layers.ActivityRegularization: [],
layers.Dense: ['kernel'],
layers.Dropout: [],
layers.Flatten: [],
layers.Lambda: [],
layers.Masking: [],
layers.Permute: [],
layers.RepeatVector: [],
layers.Reshape: [],
layers.SpatialDropout1D: [],
layers.SpatialDropout2D: [],
layers.SpatialDropout3D: [],
layers.Embedding: ['embeddings'],
layers.LocallyConnected1D: ['kernel'],
layers.LocallyConnected2D: ['kernel'],
layers.Add: [],
layers.Average: [],
layers.Concatenate: [],
layers.Dot: [],
layers.Maximum: [],
layers.Minimum: [],
layers.Multiply: [],
layers.Subtract: [],
layers.AlphaDropout: [],
layers.GaussianDropout: [],
layers.GaussianNoise: [],
layers.BatchNormalization: [],
layers.LayerNormalization: [],
layers.AveragePooling1D: [],
layers.AveragePooling2D: [],
layers.AveragePooling3D: [],
layers.GlobalAveragePooling1D: [],
layers.GlobalAveragePooling2D: [],
layers.GlobalAveragePooling3D: [],
layers.GlobalMaxPooling1D: [],
layers.GlobalMaxPooling2D: [],
layers.GlobalMaxPooling3D: [],
layers.MaxPooling1D: [],
layers.MaxPooling2D: [],
layers.MaxPooling3D: [],
TensorFlowOpLayer: [],
}
_RNN_CELLS_WEIGHTS_MAP = {
# Allowlist via compat.v1 and compat.v2 to support legacy TensorFlow 2.X
# behavior where the v2 RNN uses the v1 RNNCell instead of the v2 RNNCell.
# See b/145939875 for details.
tf.compat.v1.keras.layers.GRUCell: ['kernel', 'recurrent_kernel'],
tf.compat.v2.keras.layers.GRUCell: ['kernel', 'recurrent_kernel'],
tf.compat.v1.keras.layers.LSTMCell: ['kernel', 'recurrent_kernel'],
tf.compat.v2.keras.layers.LSTMCell: ['kernel', 'recurrent_kernel'],
tf.compat.v1.keras.experimental.PeepholeLSTMCell: [
'kernel', 'recurrent_kernel'
],
tf.compat.v2.keras.experimental.PeepholeLSTMCell: [
'kernel', 'recurrent_kernel'
],
tf.compat.v1.keras.layers.SimpleRNNCell: ['kernel', 'recurrent_kernel'],
tf.compat.v2.keras.layers.SimpleRNNCell: ['kernel', 'recurrent_kernel'],
}
_RNN_LAYERS = frozenset({
layers.GRU,
layers.LSTM,
layers.RNN,
layers.SimpleRNN,
})
_RNN_CELLS_STR = ', '.join(str(_RNN_CELLS_WEIGHTS_MAP.keys()))
_RNN_CELL_ERROR_MSG = (
'RNN Layer {} contains cell type {} which is either not supported or does'
'not inherit PrunableLayer. The cell must be one of {}, or implement '
'PrunableLayer.')
@classmethod
def supports(cls, layer):
"""Returns whether the registry supports this layer type.
Args:
layer: The layer to check for support.
Returns:
True/False whether the layer type is supported.
"""
if layer.__class__ in cls._LAYERS_WEIGHTS_MAP:
return True
if layer.__class__ in cls._RNN_LAYERS:
for cell in cls._get_rnn_cells(layer):
if cell.__class__ not in cls._RNN_CELLS_WEIGHTS_MAP \
and not isinstance(cell, prunable_layer.PrunableLayer):
return False
return True
return False
@staticmethod
def _get_rnn_cells(rnn_layer):
if isinstance(rnn_layer.cell, layers.StackedRNNCells):
return rnn_layer.cell.cells
else:
return [rnn_layer.cell]
@classmethod
def _is_rnn_layer(cls, layer):
return layer.__class__ in cls._RNN_LAYERS
@classmethod
def _weight_names(cls, layer):
return cls._LAYERS_WEIGHTS_MAP[layer.__class__]
@classmethod
def make_prunable(cls, layer):
"""Modifies a built-in layer object to support pruning.
Args:
layer: layer to modify for support.
Returns:
The modified layer object.
"""
if not cls.supports(layer):
raise ValueError('Layer ' + str(layer.__class__) + ' is not supported.')
def get_prunable_weights():
return [getattr(layer, weight) for weight in cls._weight_names(layer)]
def get_prunable_weights_rnn(): # pylint: disable=missing-docstring
def get_prunable_weights_rnn_cell(cell):
if cell.__class__ in cls._RNN_CELLS_WEIGHTS_MAP:
return [getattr(cell, weight)
for weight in cls._RNN_CELLS_WEIGHTS_MAP[cell.__class__]]
if isinstance(cell, prunable_layer.PrunableLayer):
return cell.get_prunable_weights()
raise ValueError(cls._RNN_CELL_ERROR_MSG.format(
layer.__class__, cell.__class__, cls._RNN_CELLS_WEIGHTS_MAP.keys()))
prunable_weights = []
for rnn_cell in cls._get_rnn_cells(layer):
prunable_weights.extend(get_prunable_weights_rnn_cell(rnn_cell))
return prunable_weights
if cls._is_rnn_layer(layer):
layer.get_prunable_weights = get_prunable_weights_rnn
else:
layer.get_prunable_weights = get_prunable_weights
return layer
|
py | 1a53fa42fddbd70c802408c4f3f4602afa73f6ed | def create_lr_scheduler(optimizer, config, max_epochs, num_training_instances):
if 'lr-scheduler' not in config:
return MyNoneScheduler(optimizer)
elif config['lr-scheduler']['type'] == 'linear-decay':
return MyLinearDecayScheduler(optimizer, config['lr-scheduler'], max_epochs, num_training_instances)
else:
raise BaseException("no such scheduler:", config['lr-scheduler']['type'])
class MyLinearDecayScheduler:
def __init__(self, optimizer, config, num_epoch, steps_per_epoch=1):
self.optimizer = optimizer
self.lrate0 = config['lrate0']
self.gamma = config['gamma']
self.t0 = config['t0'] * steps_per_epoch
self.t1 = config['t1'] * steps_per_epoch
self.t = 1
self.lrate = 0
def step(self):
self.t += 1
if self.t <= self.t0:
self.lrate = self.lrate0
elif self.t <= self.t1:
fraction = (self.t - self.t0) / (self.t1 - self.t0)
self.lrate = self.lrate0 * (self.gamma * fraction + 1.0 * (1 - fraction))
for group in self.optimizer.param_groups:
group['lr'] = self.lrate
return self.lrate
class MyNoneScheduler:
def __init__(self, optimizer):
self.optimizer = optimizer
def step(self):
for group in self.optimizer.param_groups:
return group['lr']
|
py | 1a53fa5b19e55674f9d3f4ea7c77652da9be2dda | from .base_dataset import Dataset
import numpy as np
import pandas as pd
import os.path
class CSVDataset(Dataset):
"""
CSVDataset class.
Provide access to the Boston Housing Prices dataset.
"""
def __init__(self, target_column, transform=None, mode="train", input_data=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# The name of the .csv dataset file should be the same as the name
# of the archive, but with a different extension.
if input_data is not None:
self.df = input_data
else:
name_prefix = self.dataset_zip_name[:self.dataset_zip_name.find('.')]
dataset_csv_name = name_prefix + '.csv'
data_path = os.path.join(self.root_path, dataset_csv_name)
self.df = pd.read_csv(data_path)
self.target_column = target_column
# split the dataset into train - val - test with the ratio 60 - 20 - 20
assert mode in ["train", "val", "test"], "wrong mode for dataset given"
train, val, test = np.split(self.df.sample(frac=1, random_state=0), [
int(.6 * len(self.df)), int(.8 * len(self.df))])
if mode == "train":
self.df = train
elif mode == "val":
self.df = val
elif mode == "test":
self.df = test
self.data = self.df.loc[:, self.df.columns != self.target_column]
self.targets = self.df[self.target_column]
self.transforms = transform if transform is not None else lambda x: x
self.data.iloc[0]['OverallQual'] = np.nan
def __len__(self):
return len(self.data)
def __getitem__(self, index):
"""
Create a dict of the data at the given index in your dataset.
The dict should have the following format:
{ "features" : <i-th row of the dataframe (except TARGET_COLUMN)>,
"label" : <value of TARGET_COLUMN for i-th row> }
"""
data_dict = {}
data_dict['features'] = self.data.iloc[index]
data_dict['target'] = self.targets.iloc[index]
return self.transforms(data_dict)
class FeatureSelectorAndNormalizationTransform:
"""
Select some numerical features and normalize them between 0 and 1.
"""
def __init__(self, column_stats, target_column):
"""
:param column_stats: a dictionary mapping the column name to the
relevant statistics for normalization (min and max on that column).
It should also include the statistics for the target column.
"""
self.column_stats = column_stats
self.target_column = target_column
def __call__(self, data_dict):
def normalize_column(old_value, column_name):
mn = self.column_stats[column_name]['min']
mx = self.column_stats[column_name]['max']
return (old_value - mn) / (mx - mn)
# For every feature column, normalize it if it's one of the columns
# we want to keep.
feature_columns = []
for column_idx in data_dict['features'].index:
if column_idx in self.column_stats and column_idx != self.target_column:
feature_columns.append(column_idx)
if np.isnan(data_dict['features'][column_idx]):
mean_col_val = self.column_stats[column_idx]['mean']
data_dict['features'][column_idx] = mean_col_val
old_value = data_dict['features'][column_idx]
normalized = normalize_column(old_value, column_idx)
data_dict['features'][column_idx] = normalized
# Drop the rest of the columns.
data_dict['features'] = data_dict['features'][feature_columns]
data_dict['features'] = data_dict['features'].values.astype(np.float32)
# Also normalize the target.
old_value = data_dict['target']
normalized = normalize_column(old_value, self.target_column)
data_dict['target'] = np.array([normalized])
return data_dict
class FeatureSelectorTransform:
"""
Select some numerical features and not normalize them, just return their old values.
This class is used for the binarized data to convert it to the correct format of CSVDataset object
so that it could be loaded by our dataloader
"""
def __init__(self, column_stats, target_column):
"""
:param column_stats: a dictionary mapping the column name to the
relevant statistics for normalization (min and max on that column).
It should also include the statistics for the target column.
"""
self.column_stats = column_stats
self.target_column = target_column
def __call__(self, data_dict):
# For every feature column, just keep it old values
feature_columns = []
for column_idx in data_dict['features'].index:
if column_idx in self.column_stats and column_idx != self.target_column:
feature_columns.append(column_idx)
if np.isnan(data_dict['features'][column_idx]):
mean_col_val = self.column_stats[column_idx]['mean']
data_dict['features'][column_idx] = mean_col_val
data_dict['features'] = data_dict['features'][feature_columns]
data_dict['features'] = data_dict['features'].values.astype(np.float32)
data_dict['target'] = np.array([data_dict['target']])
return data_dict
def get_exercise5_transform():
# dataloading and preprocessing steps as in ex04 2_logistic_regression.ipynb
target_column = 'SalePrice'
i2dl_exercises_path = os.path.dirname(os.path.abspath(os.getcwd()))
root_path = os.path.join(i2dl_exercises_path, "datasets", 'housing')
housing_file_path = os.path.join(root_path, "housing_train.csv")
download_url = 'https://cdn3.vision.in.tum.de/~dl4cv/housing_train.zip'
# Always make sure this line was run at least once before trying to
# access the data manually, as the data is downloaded in the
# constructor of CSVDataset.
train_dataset = CSVDataset(target_column=target_column, root=root_path, download_url=download_url, mode="train")
# For the data transformations, compute min, max and mean for each feature column. We perform the same transformation
# on the training, validation, and test data.
df = train_dataset.df
# Select only 2 features to keep plus the target column.
selected_columns = ['OverallQual', 'GrLivArea', target_column]
# selected_columns = ['GrLivArea', target_column]
mn, mx, mean = df.min(), df.max(), df.mean()
column_stats = {}
for column in selected_columns:
crt_col_stats = {'min': mn[column],
'max': mx[column],
'mean': mean[column]}
column_stats[column] = crt_col_stats
transform = FeatureSelectorAndNormalizationTransform(column_stats, target_column)
return transform |
py | 1a53fb2aa7db328dd843fb066649756d70f7e5b9 | """Cart-related ORM models."""
from collections import namedtuple
from decimal import Decimal
from uuid import uuid4
from django.conf import settings
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from django.urls import reverse
from django.utils.encoding import smart_str
from django.utils.timezone import now
from django_prices.models import PriceField
from jsonfield import JSONField
from prices import Price
from satchless.item import ItemLine, ItemList, partition
from . import CartStatus, logger
CENTS = Decimal('0.01')
SimpleCart = namedtuple('SimpleCart', ('quantity', 'total', 'token'))
def find_open_cart_for_user(user):
"""Find an open cart for the given user."""
carts = user.carts.open()
if len(carts) > 1:
logger.warning('%s has more than one open basket', user)
for cart in carts[1:]:
cart.change_status(CartStatus.CANCELED)
return carts.first()
class ProductGroup(ItemList):
"""A group of products."""
def is_shipping_required(self):
"""Return `True` if any product in group requires shipping."""
return any(p.is_shipping_required() for p in self)
class CartQueryset(models.QuerySet):
"""A specialized queryset for dealing with carts."""
def anonymous(self):
"""Return unassigned carts."""
return self.filter(user=None)
def open(self):
"""Return `OPEN` carts."""
return self.filter(status=CartStatus.OPEN)
def saved(self):
"""Return `SAVED` carts."""
return self.filter(status=CartStatus.SAVED)
def waiting_for_payment(self):
"""Return `SAVED_FOR_PAYMENT` carts."""
return self.filter(status=CartStatus.WAITING_FOR_PAYMENT)
def checkout(self):
"""Return carts in `CHECKOUT` state."""
return self.filter(status=CartStatus.CHECKOUT)
def canceled(self):
"""Return `CANCELED` carts."""
return self.filter(status=CartStatus.CANCELED)
def for_display(self):
"""Annotate the queryset for display purposes.
Prefetches additional data from the database to avoid the n+1 queries
problem.
"""
return self.prefetch_related(
'lines__variant__product__category',
'lines__variant__product__images',
'lines__variant__product__product_type__product_attributes__values', # noqa
'lines__variant__product__product_type__variant_attributes__values', # noqa
'lines__variant__stock')
class Cart(models.Model):
"""A shopping cart."""
status = models.CharField(
max_length=32, choices=CartStatus.CHOICES, default=CartStatus.OPEN)
created = models.DateTimeField(auto_now_add=True)
last_status_change = models.DateTimeField(auto_now_add=True)
user = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True, related_name='carts',
on_delete=models.CASCADE)
email = models.EmailField(blank=True, null=True)
token = models.UUIDField(primary_key=True, default=uuid4, editable=False)
voucher = models.ForeignKey(
'discount.Voucher', null=True, related_name='+',
on_delete=models.SET_NULL)
checkout_data = JSONField(null=True, editable=False)
total = PriceField(
currency=settings.DEFAULT_CURRENCY, max_digits=12, decimal_places=2,
default=0)
quantity = models.PositiveIntegerField(default=0)
objects = CartQueryset.as_manager()
class Meta:
ordering = ('-last_status_change',)
def __init__(self, *args, **kwargs):
self.discounts = kwargs.pop('discounts', None)
super().__init__(*args, **kwargs)
def update_quantity(self):
"""Recalculate cart quantity based on lines."""
total_lines = self.count()['total_quantity']
if not total_lines:
total_lines = 0
self.quantity = total_lines
self.save(update_fields=['quantity'])
def change_status(self, status):
"""Change cart status."""
# FIXME: investigate replacing with django-fsm transitions
if status not in dict(CartStatus.CHOICES):
raise ValueError('Not expected status')
if status != self.status:
self.status = status
self.last_status_change = now()
self.save()
def change_user(self, user):
"""Assign cart to a user.
If the user already has an open cart assigned, cancel it.
"""
open_cart = find_open_cart_for_user(user)
if open_cart is not None:
open_cart.change_status(status=CartStatus.CANCELED)
self.user = user
self.save(update_fields=['user'])
def is_shipping_required(self):
"""Return `True` if any of the lines requires shipping."""
return any(line.is_shipping_required() for line in self.lines.all())
def __repr__(self):
return 'Cart(quantity=%s)' % (self.quantity,)
def __len__(self):
return self.lines.count()
# pylint: disable=R0201
def get_subtotal(self, item, **kwargs):
"""Return the cost of a cart line."""
return item.get_total(**kwargs)
def get_total(self, **kwargs):
"""Return the total cost of the cart prior to shipping."""
subtotals = [
self.get_subtotal(item, **kwargs) for item in self.lines.all()]
if not subtotals:
raise AttributeError('Calling get_total() on an empty item set')
zero = Price(0, currency=settings.DEFAULT_CURRENCY)
return sum(subtotals, zero)
def count(self):
"""Return the total quantity in cart."""
lines = self.lines.all()
return lines.aggregate(total_quantity=models.Sum('quantity'))
def is_empty(self):
"""Look if every line is empty.
Note that `self.count()['total_quantity']` can be None.
"""
return (self.count()['total_quantity'] or 0) < 1
def clear(self):
"""Remove the cart."""
self.delete()
def create_line(self, variant, quantity, data):
"""Create a cart line for given variant, quantity and optional data.
The `data` parameter may be used to differentiate between items with
different customization options.
"""
return self.lines.create(
variant=variant, quantity=quantity, data=data or {})
def get_line(self, variant, data=None):
"""Return a line matching the given variant and data if any."""
all_lines = self.lines.all()
if data is None:
data = {}
line = [
line for line in all_lines
if line.variant_id == variant.id and line.data == data]
if line:
return line[0]
return None
def add(self, variant, quantity=1, data=None, replace=False,
check_quantity=True):
"""Add a product vartiant to cart.
The `data` parameter may be used to differentiate between items with
different customization options.
If `replace` is truthy then any previous quantity is discarded instead
of added to.
"""
cart_line, dummy_created = self.lines.get_or_create(
variant=variant, defaults={'quantity': 0, 'data': data or {}})
if replace:
new_quantity = quantity
else:
new_quantity = cart_line.quantity + quantity
if new_quantity < 0:
raise ValueError('%r is not a valid quantity (results in %r)' % (
quantity, new_quantity))
if check_quantity:
variant.check_quantity(new_quantity)
cart_line.quantity = new_quantity
if not cart_line.quantity:
cart_line.delete()
else:
cart_line.save(update_fields=['quantity'])
self.update_quantity()
def partition(self):
"""Split the card into a list of groups for shipping."""
grouper = (
lambda p: 'physical' if p.is_shipping_required() else 'digital')
return partition(self.lines.all(), grouper, ProductGroup)
@staticmethod
def generate_permalink_from_lines(lines):
variant_quantity = ''
for line in lines: # type: CartLine
if line.quantity > 0:
variant_quantity += '{line.variant_id}-{line.quantity}-'.format(line=line)
# remove last dash
variant_quantity = variant_quantity[:-1]
url = reverse('cart:get-cart', kwargs={'variant_quantity': variant_quantity})
return url
def generate_permalink(self, as_short_link=False):
url = None
lines = self.lines.all()
if lines:
url = self.generate_permalink_from_lines(lines)
if as_short_link:
raise NotImplementedError
return url
class CartLine(models.Model, ItemLine):
"""A single cart line.
Multiple lines in the same cart can refer to the same product variant if
their `data` field is different.
"""
cart = models.ForeignKey(
Cart, related_name='lines', on_delete=models.CASCADE)
variant = models.ForeignKey(
'product.ProductVariant', related_name='+', on_delete=models.CASCADE)
quantity = models.PositiveIntegerField(
validators=[MinValueValidator(0), MaxValueValidator(999)])
data = JSONField(blank=True, default={})
class Meta:
unique_together = ('cart', 'variant', 'data')
def __str__(self):
return smart_str(self.variant)
def __eq__(self, other):
if not isinstance(other, CartLine):
return NotImplemented
return (
self.variant == other.variant and
self.quantity == other.quantity and
self.data == other.data)
def __ne__(self, other):
return not self == other # pragma: no cover
def __repr__(self):
return 'CartLine(variant=%r, quantity=%r, data=%r)' % (
self.variant, self.quantity, self.data)
def __getstate__(self):
return self.variant, self.quantity, self.data
def __setstate__(self, data):
self.variant, self.quantity, self.data = data
def get_total(self, **kwargs):
"""Return the total price of this line."""
amount = super().get_total(**kwargs)
return amount.quantize(CENTS)
def get_quantity(self, **kwargs):
"""Return the line's quantity."""
return self.quantity
# pylint: disable=W0221
def get_price_per_item(self, discounts=None, **kwargs):
"""Return the unit price of the line."""
return self.variant.get_price_per_item(discounts=discounts, **kwargs)
def is_shipping_required(self):
"""Return `True` if the related product variant requires shipping."""
return self.variant.is_shipping_required()
|
py | 1a53fb5c625bc9538c554cf4f08dad8e9aae5940 | # coding=utf-8
# author@alingse
# 2016.10.08
__version__ = (0, 0, 3, 'a1')
VERSION = '.'.join(map(str, __version__))
|
py | 1a53fd71c068626e3d91e737c9b295f4e0a1b0a9 | # coding: utf-8
from __future__ import unicode_literals
import re
import random
from .common import InfoExtractor
from ..utils import (
int_or_none,
float_or_none,
unified_strdate,
)
class PornoVoisinesIE(InfoExtractor):
_VALID_URL = r'http://(?:www\.)?pornovoisines\.com/showvideo/(?P<id>\d+)/(?P<display_id>[^/]+)'
_VIDEO_URL_TEMPLATE = 'http://stream%d.pornovoisines.com' \
'/static/media/video/transcoded/%s-640x360-1000-trscded.mp4'
_SERVER_NUMBERS = (1, 2)
_TEST = {
'url': 'http://www.pornovoisines.com/showvideo/1285/recherche-appartement/',
'md5': '5ac670803bc12e9e7f9f662ce64cf1d1',
'info_dict': {
'id': '1285',
'display_id': 'recherche-appartement',
'ext': 'mp4',
'title': 'Recherche appartement',
'description': 'md5:819ea0b785e2a04667a1a01cdc89594e',
'thumbnail': 're:^https?://.*\.jpg$',
'upload_date': '20140925',
'duration': 120,
'view_count': int,
'average_rating': float,
'categories': ['Débutantes', 'Scénario', 'Sodomie'],
'age_limit': 18,
}
}
@classmethod
def build_video_url(cls, num):
return cls._VIDEO_URL_TEMPLATE % (random.choice(cls._SERVER_NUMBERS), num)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
display_id = mobj.group('display_id')
webpage = self._download_webpage(url, video_id)
video_url = self.build_video_url(video_id)
title = self._html_search_regex(
r'<h1>(.+?)</h1>', webpage, 'title', flags=re.DOTALL)
description = self._html_search_regex(
r'<article id="descriptif">(.+?)</article>',
webpage, 'description', fatal=False, flags=re.DOTALL)
thumbnail = self._search_regex(
r'<div id="mediaspace%s">\s*<img src="/?([^"]+)"' % video_id,
webpage, 'thumbnail', fatal=False)
if thumbnail:
thumbnail = 'http://www.pornovoisines.com/%s' % thumbnail
upload_date = unified_strdate(self._search_regex(
r'Publié le ([\d-]+)', webpage, 'upload date', fatal=False))
duration = int_or_none(self._search_regex(
'Durée (\d+)', webpage, 'duration', fatal=False))
view_count = int_or_none(self._search_regex(
r'(\d+) vues', webpage, 'view count', fatal=False))
average_rating = self._search_regex(
r'Note\s*:\s*(\d+(?:,\d+)?)', webpage, 'average rating', fatal=False)
if average_rating:
average_rating = float_or_none(average_rating.replace(',', '.'))
categories = self._html_search_meta(
'keywords', webpage, 'categories', fatal=False)
if categories:
categories = [category.strip() for category in categories.split(',')]
return {
'id': video_id,
'display_id': display_id,
'url': video_url,
'title': title,
'description': description,
'thumbnail': thumbnail,
'upload_date': upload_date,
'duration': duration,
'view_count': view_count,
'average_rating': average_rating,
'categories': categories,
'age_limit': 18,
}
|
py | 1a53fd87df6306f956f521d3c57637769f11487a | # -*- coding: utf-8 -*-
import copy
import datetime
import sys
from blist import sortedlist
from util import add_raw_postfix
from util import dt_to_ts
from util import EAException
from util import elastalert_logger
from util import elasticsearch_client
from util import format_index
from util import hashable
from util import lookup_es_key
from util import new_get_event_ts
from util import pretty_ts
from util import total_seconds
from util import ts_now
from util import ts_to_dt
class RuleType(object):
""" The base class for a rule type.
The class must implement add_data and add any matches to self.matches.
:param rules: A rule configuration.
"""
required_options = frozenset()
def __init__(self, rules, args=None):
self.matches = []
self.rules = rules
self.occurrences = {}
self.rules['owner'] = self.rules.get('owner', '')
self.rules['priority'] = self.rules.get('priority', '2')
def add_data(self, data):
""" The function that the ElastAlert client calls with results from ES.
Data is a list of dictionaries, from Elasticsearch.
:param data: A list of events, each of which is a dictionary of terms.
"""
raise NotImplementedError()
def add_match(self, event):
""" This function is called on all matching events. Rules use it to add
extra information about the context of a match. Event is a dictionary
containing terms directly from Elasticsearch and alerts will report
all of the information.
:param event: The matching event, a dictionary of terms.
"""
# Convert datetime's back to timestamps
ts = self.rules.get('timestamp_field')
if ts in event:
event[ts] = dt_to_ts(event[ts])
self.matches.append(copy.deepcopy(event))
def get_match_str(self, match):
""" Returns a string that gives more context about a match.
:param match: The matching event, a dictionary of terms.
:return: A user facing string describing the match.
"""
return ''
def garbage_collect(self, timestamp):
""" Gets called periodically to remove old data that is useless beyond given timestamp.
May also be used to compute things in the absence of new data.
:param timestamp: A timestamp indicating the rule has been run up to that point.
"""
pass
def add_count_data(self, counts):
""" Gets called when a rule has use_count_query set to True. Called to add data from querying to the rule.
:param counts: A dictionary mapping timestamps to hit counts.
"""
raise NotImplementedError()
def add_terms_data(self, terms):
""" Gets called when a rule has use_terms_query set to True.
:param terms: A list of buckets with a key, corresponding to query_key, and the count """
raise NotImplementedError()
def add_aggregation_data(self, payload):
""" Gets called when a rule has use_terms_query set to True.
:param terms: A list of buckets with a key, corresponding to query_key, and the count """
raise NotImplementedError()
class CompareRule(RuleType):
""" A base class for matching a specific term by passing it to a compare function """
required_options = frozenset(['compound_compare_key'])
def expand_entries(self, list_type):
""" Expand entries specified in files using the '!file' directive, if there are
any, then add everything to a set.
"""
entries_set = set()
for entry in self.rules[list_type]:
if entry.startswith("!file"): # - "!file /path/to/list"
filename = entry.split()[1]
with open(filename, 'r') as f:
for line in f:
entries_set.add(line.rstrip())
else:
entries_set.add(entry)
self.rules[list_type] = entries_set
def compare(self, event):
""" An event is a match if this returns true """
raise NotImplementedError()
def add_data(self, data):
# If compare returns true, add it as a match
for event in data:
if self.compare(event):
self.add_match(event)
class BlacklistRule(CompareRule):
""" A CompareRule where the compare function checks a given key against a blacklist """
required_options = frozenset(['compare_key', 'blacklist'])
def __init__(self, rules, args=None):
super(BlacklistRule, self).__init__(rules, args=None)
self.expand_entries('blacklist')
def compare(self, event):
term = lookup_es_key(event, self.rules['compare_key'])
if term in self.rules['blacklist']:
return True
return False
class WhitelistRule(CompareRule):
""" A CompareRule where the compare function checks a given term against a whitelist """
required_options = frozenset(['compare_key', 'whitelist', 'ignore_null'])
def __init__(self, rules, args=None):
super(WhitelistRule, self).__init__(rules, args=None)
self.expand_entries('whitelist')
def compare(self, event):
term = lookup_es_key(event, self.rules['compare_key'])
if term is None:
return not self.rules['ignore_null']
if term not in self.rules['whitelist']:
return True
return False
class ChangeRule(CompareRule):
""" A rule that will store values for a certain term and match if those values change """
required_options = frozenset(['query_key', 'compound_compare_key', 'ignore_null'])
change_map = {}
occurrence_time = {}
def compare(self, event):
key = hashable(lookup_es_key(event, self.rules['query_key']))
values = []
elastalert_logger.debug(" Previous Values of compare keys " + str(self.occurrences))
for val in self.rules['compound_compare_key']:
lookup_value = lookup_es_key(event, val)
values.append(lookup_value)
elastalert_logger.debug(" Current Values of compare keys " + str(values))
changed = False
for val in values:
if not isinstance(val, bool) and not val and self.rules['ignore_null']:
return False
# If we have seen this key before, compare it to the new value
if key in self.occurrences:
for idx, previous_values in enumerate(self.occurrences[key]):
elastalert_logger.debug(" " + str(previous_values) + " " + str(values[idx]))
changed = previous_values != values[idx]
if changed:
break
if changed:
self.change_map[key] = (self.occurrences[key], values)
# If using timeframe, only return true if the time delta is < timeframe
if key in self.occurrence_time:
changed = event[self.rules['timestamp_field']] - self.occurrence_time[key] <= self.rules['timeframe']
# Update the current value and time
elastalert_logger.debug(" Setting current value of compare keys values " + str(values))
self.occurrences[key] = values
if 'timeframe' in self.rules:
self.occurrence_time[key] = event[self.rules['timestamp_field']]
elastalert_logger.debug("Final result of comparision between previous and current values " + str(changed))
return changed
def add_match(self, match):
# TODO this is not technically correct
# if the term changes multiple times before an alert is sent
# this data will be overwritten with the most recent change
change = self.change_map.get(hashable(lookup_es_key(match, self.rules['query_key'])))
extra = {}
if change:
extra = {'old_value': change[0],
'new_value': change[1]}
elastalert_logger.debug("Description of the changed records " + str(dict(match.items() + extra.items())))
super(ChangeRule, self).add_match(dict(match.items() + extra.items()))
class FrequencyRule(RuleType):
""" A rule that matches if num_events number of events occur within a timeframe """
required_options = frozenset(['num_events', 'timeframe'])
def __init__(self, *args):
super(FrequencyRule, self).__init__(*args)
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.get_ts = new_get_event_ts(self.ts_field)
self.attach_related = self.rules.get('attach_related', False)
def add_count_data(self, data):
""" Add count data to the rule. Data should be of the form {ts: count}. """
if len(data) > 1:
raise EAException('add_count_data can only accept one count at a time')
(ts, count), = data.items()
event = ({self.ts_field: ts}, count)
self.occurrences.setdefault('all', EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event)
self.check_for_match('all')
def add_terms_data(self, terms):
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
event = ({self.ts_field: timestamp,
self.rules['query_key']: bucket['key']}, bucket['doc_count'])
self.occurrences.setdefault(bucket['key'], EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append(event)
self.check_for_match(bucket['key'])
def add_data(self, data):
if 'query_key' in self.rules:
qk = self.rules['query_key']
else:
qk = None
for event in data:
if qk:
key = hashable(lookup_es_key(event, qk))
else:
# If no query_key, we use the key 'all' for all events
key = 'all'
# Store the timestamps of recent occurrences, per key
self.occurrences.setdefault(key, EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)).append((event, 1))
self.check_for_match(key, end=False)
# We call this multiple times with the 'end' parameter because subclasses
# may or may not want to check while only partial data has been added
if key in self.occurrences: # could have been emptied by previous check
self.check_for_match(key, end=True)
def check_for_match(self, key, end=False):
# Match if, after removing old events, we hit num_events.
# the 'end' parameter depends on whether this was called from the
# middle or end of an add_data call and is used in subclasses
if self.occurrences[key].count() >= self.rules['num_events']:
event = self.occurrences[key].data[-1][0]
if self.attach_related:
event['related_events'] = [data[0] for data in self.occurrences[key].data[:-1]]
self.add_match(event)
self.occurrences.pop(key)
def garbage_collect(self, timestamp):
""" Remove all occurrence data that is beyond the timeframe away """
stale_keys = []
for key, window in self.occurrences.iteritems():
if timestamp - lookup_es_key(window.data[-1][0], self.ts_field) > self.rules['timeframe']:
stale_keys.append(key)
map(self.occurrences.pop, stale_keys)
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
match_ts = lookup_es_key(match, self.ts_field)
starttime = pretty_ts(dt_to_ts(ts_to_dt(match_ts) - self.rules['timeframe']), lt)
endtime = pretty_ts(match_ts, lt)
message = 'At least %d events occurred between %s and %s\n\n' % (self.rules['num_events'],
starttime,
endtime)
return message
class AnyRule(RuleType):
""" A rule that will match on any input data """
def add_data(self, data):
for datum in data:
self.add_match(datum)
class EventWindow(object):
""" A container for hold event counts for rules which need a chronological ordered event window. """
def __init__(self, timeframe, onRemoved=None, getTimestamp=new_get_event_ts('@timestamp')):
self.timeframe = timeframe
self.onRemoved = onRemoved
self.get_ts = getTimestamp
self.data = sortedlist(key=self.get_ts)
self.running_count = 0
def clear(self):
self.data = sortedlist(key=self.get_ts)
self.running_count = 0
def append(self, event):
""" Add an event to the window. Event should be of the form (dict, count).
This will also pop the oldest events and call onRemoved on them until the
window size is less than timeframe. """
self.data.add(event)
self.running_count += event[1]
while self.duration() >= self.timeframe:
oldest = self.data[0]
self.data.remove(oldest)
self.running_count -= oldest[1]
self.onRemoved and self.onRemoved(oldest)
def duration(self):
""" Get the size in timedelta of the window. """
if not self.data:
return datetime.timedelta(0)
return self.get_ts(self.data[-1]) - self.get_ts(self.data[0])
def count(self):
""" Count the number of events in the window. """
return self.running_count
def mean(self):
""" Compute the mean of the value_field in the window. """
if len(self.data) > 0:
datasum = 0
datalen = 0
for dat in self.data:
if "placeholder" not in dat[0]:
datasum += dat[1]
datalen += 1
if datalen > 0:
return datasum / float(datalen)
return None
else:
return None
def __iter__(self):
return iter(self.data)
def append_middle(self, event):
""" Attempt to place the event in the correct location in our deque.
Returns True if successful, otherwise False. """
rotation = 0
ts = self.get_ts(event)
# Append left if ts is earlier than first event
if self.get_ts(self.data[0]) > ts:
self.data.appendleft(event)
self.running_count += event[1]
return
# Rotate window until we can insert event
while self.get_ts(self.data[-1]) > ts:
self.data.rotate(1)
rotation += 1
if rotation == len(self.data):
# This should never happen
return
self.data.append(event)
self.running_count += event[1]
self.data.rotate(-rotation)
class SpikeRule(RuleType):
""" A rule that uses two sliding windows to compare relative event frequency. """
required_options = frozenset(['timeframe', 'spike_height', 'spike_type'])
def __init__(self, *args):
super(SpikeRule, self).__init__(*args)
self.timeframe = self.rules['timeframe']
self.ref_windows = {}
self.cur_windows = {}
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.get_ts = new_get_event_ts(self.ts_field)
self.first_event = {}
self.skip_checks = {}
self.field_value = self.rules.get('field_value')
self.ref_window_filled_once = False
def add_count_data(self, data):
""" Add count data to the rule. Data should be of the form {ts: count}. """
if len(data) > 1:
raise EAException('add_count_data can only accept one count at a time')
for ts, count in data.iteritems():
self.handle_event({self.ts_field: ts}, count, 'all')
def add_terms_data(self, terms):
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
count = bucket['doc_count']
event = {self.ts_field: timestamp,
self.rules['query_key']: bucket['key']}
key = bucket['key']
self.handle_event(event, count, key)
def add_data(self, data):
for event in data:
qk = self.rules.get('query_key', 'all')
if qk != 'all':
qk = hashable(lookup_es_key(event, qk))
if qk is None:
qk = 'other'
if self.field_value is not None:
if self.field_value in event:
count = lookup_es_key(event, self.field_value)
if count is not None:
try:
count = int(count)
except ValueError:
elastalert_logger.warn('{} is not a number: {}'.format(self.field_value, count))
else:
self.handle_event(event, count, qk)
else:
self.handle_event(event, 1, qk)
def clear_windows(self, qk, event):
# Reset the state and prevent alerts until windows filled again
self.ref_windows[qk].clear()
self.first_event.pop(qk)
self.skip_checks[qk] = event[self.ts_field] + self.rules['timeframe'] * 2
def handle_event(self, event, count, qk='all'):
self.first_event.setdefault(qk, event)
self.ref_windows.setdefault(qk, EventWindow(self.timeframe, getTimestamp=self.get_ts))
self.cur_windows.setdefault(qk, EventWindow(self.timeframe, self.ref_windows[qk].append, self.get_ts))
self.cur_windows[qk].append((event, count))
# Don't alert if ref window has not yet been filled for this key AND
if event[self.ts_field] - self.first_event[qk][self.ts_field] < self.rules['timeframe'] * 2:
# ElastAlert has not been running long enough for any alerts OR
if not self.ref_window_filled_once:
return
# This rule is not using alert_on_new_data (with query_key) OR
if not (self.rules.get('query_key') and self.rules.get('alert_on_new_data')):
return
# An alert for this qk has recently fired
if qk in self.skip_checks and event[self.ts_field] < self.skip_checks[qk]:
return
else:
self.ref_window_filled_once = True
if self.field_value is not None:
if self.find_matches(self.ref_windows[qk].mean(), self.cur_windows[qk].mean()):
# skip over placeholder events
for match, count in self.cur_windows[qk].data:
if "placeholder" not in match:
break
self.add_match(match, qk)
self.clear_windows(qk, match)
else:
if self.find_matches(self.ref_windows[qk].count(), self.cur_windows[qk].count()):
# skip over placeholder events which have count=0
for match, count in self.cur_windows[qk].data:
if count:
break
self.add_match(match, qk)
self.clear_windows(qk, match)
def add_match(self, match, qk):
extra_info = {}
if self.field_value is None:
spike_count = self.cur_windows[qk].count()
reference_count = self.ref_windows[qk].count()
else:
spike_count = self.cur_windows[qk].mean()
reference_count = self.ref_windows[qk].mean()
extra_info = {'spike_count': spike_count,
'reference_count': reference_count}
match = dict(match.items() + extra_info.items())
super(SpikeRule, self).add_match(match)
def find_matches(self, ref, cur):
""" Determines if an event spike or dip happening. """
# Apply threshold limits
if self.field_value is None:
if (cur < self.rules.get('threshold_cur', 0) or
ref < self.rules.get('threshold_ref', 0)):
return False
elif ref is None or ref == 0 or cur is None or cur == 0:
return False
spike_up, spike_down = False, False
if cur <= ref / self.rules['spike_height']:
spike_down = True
if cur >= ref * self.rules['spike_height']:
spike_up = True
if (self.rules['spike_type'] in ['both', 'up'] and spike_up) or \
(self.rules['spike_type'] in ['both', 'down'] and spike_down):
return True
return False
def get_match_str(self, match):
if self.field_value is None:
message = 'An abnormal number (%d) of events occurred around %s.\n' % (
match['spike_count'],
pretty_ts(match[self.rules['timestamp_field']], self.rules.get('use_local_time'))
)
message += 'Preceding that time, there were only %d events within %s\n\n' % (match['reference_count'], self.rules['timeframe'])
else:
message = 'An abnormal average value (%.2f) of field \'%s\' occurred around %s.\n' % (
match['spike_count'],
self.field_value,
pretty_ts(match[self.rules['timestamp_field']],
self.rules.get('use_local_time'))
)
message += 'Preceding that time, the field had an average value of (%.2f) within %s\n\n' % (
match['reference_count'], self.rules['timeframe'])
return message
def garbage_collect(self, ts):
# Windows are sized according to their newest event
# This is a placeholder to accurately size windows in the absence of events
for qk in self.cur_windows.keys():
# If we havn't seen this key in a long time, forget it
if qk != 'all' and self.ref_windows[qk].count() == 0 and self.cur_windows[qk].count() == 0:
self.cur_windows.pop(qk)
self.ref_windows.pop(qk)
continue
placeholder = {self.ts_field: ts, "placeholder": True}
# The placeholder may trigger an alert, in which case, qk will be expected
if qk != 'all':
placeholder.update({self.rules['query_key']: qk})
self.handle_event(placeholder, 0, qk)
class FlatlineRule(FrequencyRule):
""" A rule that matches when there is a low number of events given a timeframe. """
required_options = frozenset(['timeframe', 'threshold'])
def __init__(self, *args):
super(FlatlineRule, self).__init__(*args)
self.threshold = self.rules['threshold']
# Dictionary mapping query keys to the first events
self.first_event = {}
def check_for_match(self, key, end=True):
# This function gets called between every added document with end=True after the last
# We ignore the calls before the end because it may trigger false positives
if not end:
return
most_recent_ts = self.get_ts(self.occurrences[key].data[-1])
if self.first_event.get(key) is None:
self.first_event[key] = most_recent_ts
# Don't check for matches until timeframe has elapsed
if most_recent_ts - self.first_event[key] < self.rules['timeframe']:
return
# Match if, after removing old events, we hit num_events
count = self.occurrences[key].count()
if count < self.rules['threshold']:
# Do a deep-copy, otherwise we lose the datetime type in the timestamp field of the last event
event = copy.deepcopy(self.occurrences[key].data[-1][0])
event.update(key=key, count=count)
self.add_match(event)
if not self.rules.get('forget_keys'):
# After adding this match, leave the occurrences windows alone since it will
# be pruned in the next add_data or garbage_collect, but reset the first_event
# so that alerts continue to fire until the threshold is passed again.
least_recent_ts = self.get_ts(self.occurrences[key].data[0])
timeframe_ago = most_recent_ts - self.rules['timeframe']
self.first_event[key] = min(least_recent_ts, timeframe_ago)
else:
# Forget about this key until we see it again
self.first_event.pop(key)
self.occurrences.pop(key)
def get_match_str(self, match):
ts = match[self.rules['timestamp_field']]
lt = self.rules.get('use_local_time')
message = 'An abnormally low number of events occurred around %s.\n' % (pretty_ts(ts, lt))
message += 'Between %s and %s, there were less than %s events.\n\n' % (
pretty_ts(dt_to_ts(ts_to_dt(ts) - self.rules['timeframe']), lt),
pretty_ts(ts, lt),
self.rules['threshold']
)
return message
def garbage_collect(self, ts):
# We add an event with a count of zero to the EventWindow for each key. This will cause the EventWindow
# to remove events that occurred more than one `timeframe` ago, and call onRemoved on them.
default = ['all'] if 'query_key' not in self.rules else []
for key in self.occurrences.keys() or default:
self.occurrences.setdefault(
key,
EventWindow(self.rules['timeframe'], getTimestamp=self.get_ts)
).append(
({self.ts_field: ts}, 0)
)
self.first_event.setdefault(key, ts)
self.check_for_match(key)
class NewTermsRule(RuleType):
""" Alerts on a new value in a list of fields. """
def __init__(self, rule, args=None):
super(NewTermsRule, self).__init__(rule, args)
self.seen_values = {}
# Allow the use of query_key or fields
if 'fields' not in self.rules:
if 'query_key' not in self.rules:
raise EAException("fields or query_key must be specified")
self.fields = self.rules['query_key']
else:
self.fields = self.rules['fields']
if not self.fields:
raise EAException("fields must not be an empty list")
if type(self.fields) != list:
self.fields = [self.fields]
if self.rules.get('use_terms_query') and \
(len(self.fields) != 1 or (len(self.fields) == 1 and type(self.fields[0]) == list)):
raise EAException("use_terms_query can only be used with a single non-composite field")
if self.rules.get('use_terms_query'):
if self.rules.get('query_key') != self.fields:
raise EAException('If use_terms_query is specified, you cannot specify different query_key and fields')
if not self.rules.get('query_key').endswith('.keyword') and not self.rules.get('query_key').endswith('.raw'):
if self.rules.get('use_keyword_postfix', True):
elastalert_logger.warn('Warning: If query_key is a non-keyword field, you must set '
'use_keyword_postfix to false, or add .keyword/.raw to your query_key.')
try:
self.get_all_terms(args)
except Exception as e:
# Refuse to start if we cannot get existing terms
raise EAException('Error searching for existing terms: %s' % (repr(e))), None, sys.exc_info()[2]
def get_all_terms(self, args):
""" Performs a terms aggregation for each field to get every existing term. """
self.es = elasticsearch_client(self.rules)
window_size = datetime.timedelta(**self.rules.get('terms_window_size', {'days': 30}))
field_name = {"field": "", "size": 2147483647} # Integer.MAX_VALUE
query_template = {"aggs": {"values": {"terms": field_name}}}
if args and hasattr(args, 'start') and args.start:
end = ts_to_dt(args.start)
elif 'start_date' in self.rules:
end = ts_to_dt(self.rules['start_date'])
else:
end = ts_now()
start = end - window_size
step = datetime.timedelta(**self.rules.get('window_step_size', {'days': 1}))
for field in self.fields:
tmp_start = start
tmp_end = min(start + step, end)
time_filter = {self.rules['timestamp_field']: {'lt': self.rules['dt_to_ts'](tmp_end), 'gte': self.rules['dt_to_ts'](tmp_start)}}
query_template['filter'] = {'bool': {'must': [{'range': time_filter}]}}
query = {'aggs': {'filtered': query_template}}
if 'filter' in self.rules:
for item in self.rules['filter']:
query_template['filter']['bool']['must'].append(item)
# For composite keys, we will need to perform sub-aggregations
if type(field) == list:
self.seen_values.setdefault(tuple(field), [])
level = query_template['aggs']
# Iterate on each part of the composite key and add a sub aggs clause to the elastic search query
for i, sub_field in enumerate(field):
if self.rules.get('use_keyword_postfix', True):
level['values']['terms']['field'] = add_raw_postfix(sub_field, self.is_five_or_above())
else:
level['values']['terms']['field'] = sub_field
if i < len(field) - 1:
# If we have more fields after the current one, then set up the next nested structure
level['values']['aggs'] = {'values': {'terms': copy.deepcopy(field_name)}}
level = level['values']['aggs']
else:
self.seen_values.setdefault(field, [])
# For non-composite keys, only a single agg is needed
if self.rules.get('use_keyword_postfix', True):
field_name['field'] = add_raw_postfix(field, self.is_five_or_above())
else:
field_name['field'] = field
# Query the entire time range in small chunks
while tmp_start < end:
if self.rules.get('use_strftime_index'):
index = format_index(self.rules['index'], tmp_start, tmp_end)
else:
index = self.rules['index']
res = self.es.search(body=query, index=index, ignore_unavailable=True, timeout='50s')
if 'aggregations' in res:
buckets = res['aggregations']['filtered']['values']['buckets']
if type(field) == list:
# For composite keys, make the lookup based on all fields
# Make it a tuple since it can be hashed and used in dictionary lookups
for bucket in buckets:
# We need to walk down the hierarchy and obtain the value at each level
self.seen_values[tuple(field)] += self.flatten_aggregation_hierarchy(bucket)
else:
keys = [bucket['key'] for bucket in buckets]
self.seen_values[field] += keys
else:
if type(field) == list:
self.seen_values.setdefault(tuple(field), [])
else:
self.seen_values.setdefault(field, [])
if tmp_start == tmp_end:
break
tmp_start = tmp_end
tmp_end = min(tmp_start + step, end)
time_filter[self.rules['timestamp_field']] = {'lt': self.rules['dt_to_ts'](tmp_end),
'gte': self.rules['dt_to_ts'](tmp_start)}
for key, values in self.seen_values.iteritems():
if not values:
if type(key) == tuple:
# If we don't have any results, it could either be because of the absence of any baseline data
# OR it may be because the composite key contained a non-primitive type. Either way, give the
# end-users a heads up to help them debug what might be going on.
elastalert_logger.warning((
'No results were found from all sub-aggregations. This can either indicate that there is '
'no baseline data OR that a non-primitive field was used in a composite key.'
))
else:
elastalert_logger.info('Found no values for %s' % (field))
continue
self.seen_values[key] = list(set(values))
elastalert_logger.info('Found %s unique values for %s' % (len(set(values)), key))
def flatten_aggregation_hierarchy(self, root, hierarchy_tuple=()):
""" For nested aggregations, the results come back in the following format:
{
"aggregations" : {
"filtered" : {
"doc_count" : 37,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "1.1.1.1", # IP address (root)
"doc_count" : 13,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "80", # Port (sub-aggregation)
"doc_count" : 3,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "ack", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
}, {
"key" : "syn", # Reason (sub-aggregation, leaf-node)
"doc_count" : 1
} ]
}
}, {
"key" : "82", # Port (sub-aggregation)
"doc_count" : 3,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "ack", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
}, {
"key" : "syn", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
} ]
}
} ]
}
}, {
"key" : "2.2.2.2", # IP address (root)
"doc_count" : 4,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "443", # Port (sub-aggregation)
"doc_count" : 3,
"values" : {
"doc_count_error_upper_bound" : 0,
"sum_other_doc_count" : 0,
"buckets" : [ {
"key" : "ack", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
}, {
"key" : "syn", # Reason (sub-aggregation, leaf-node)
"doc_count" : 3
} ]
}
} ]
}
} ]
}
}
}
}
Each level will either have more values and buckets, or it will be a leaf node
We'll ultimately return a flattened list with the hierarchies appended as strings,
e.g the above snippet would yield a list with:
[
('1.1.1.1', '80', 'ack'),
('1.1.1.1', '80', 'syn'),
('1.1.1.1', '82', 'ack'),
('1.1.1.1', '82', 'syn'),
('2.2.2.2', '443', 'ack'),
('2.2.2.2', '443', 'syn')
]
A similar formatting will be performed in the add_data method and used as the basis for comparison
"""
results = []
# There are more aggregation hierarchies left. Traverse them.
if 'values' in root:
results += self.flatten_aggregation_hierarchy(root['values']['buckets'], hierarchy_tuple + (root['key'],))
else:
# We've gotten to a sub-aggregation, which may have further sub-aggregations
# See if we need to traverse further
for node in root:
if 'values' in node:
results += self.flatten_aggregation_hierarchy(node, hierarchy_tuple)
else:
results.append(hierarchy_tuple + (node['key'],))
return results
def add_data(self, data):
for document in data:
for field in self.fields:
value = ()
lookup_field = field
if type(field) == list:
# For composite keys, make the lookup based on all fields
# Make it a tuple since it can be hashed and used in dictionary lookups
lookup_field = tuple(field)
for sub_field in field:
lookup_result = lookup_es_key(document, sub_field)
if not lookup_result:
value = None
break
value += (lookup_result,)
else:
value = lookup_es_key(document, field)
if not value and self.rules.get('alert_on_missing_field'):
document['missing_field'] = lookup_field
self.add_match(copy.deepcopy(document))
elif value:
if value not in self.seen_values[lookup_field]:
document['new_field'] = lookup_field
self.add_match(copy.deepcopy(document))
self.seen_values[lookup_field].append(value)
def add_terms_data(self, terms):
# With terms query, len(self.fields) is always 1 and the 0'th entry is always a string
field = self.fields[0]
for timestamp, buckets in terms.iteritems():
for bucket in buckets:
if bucket['doc_count']:
if bucket['key'] not in self.seen_values[field]:
match = {field: bucket['key'],
self.rules['timestamp_field']: timestamp,
'new_field': field}
self.add_match(match)
self.seen_values[field].append(bucket['key'])
def is_five_or_above(self):
version = self.es.info()['version']['number']
return int(version[0]) >= 5
class CardinalityRule(RuleType):
""" A rule that matches if cardinality of a field is above or below a threshold within a timeframe """
required_options = frozenset(['timeframe', 'cardinality_field'])
def __init__(self, *args):
super(CardinalityRule, self).__init__(*args)
if 'max_cardinality' not in self.rules and 'min_cardinality' not in self.rules:
raise EAException("CardinalityRule must have one of either max_cardinality or min_cardinality")
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
self.cardinality_field = self.rules['cardinality_field']
self.cardinality_cache = {}
self.first_event = {}
self.timeframe = self.rules['timeframe']
def add_data(self, data):
qk = self.rules.get('query_key')
for event in data:
if qk:
key = hashable(lookup_es_key(event, qk))
else:
# If no query_key, we use the key 'all' for all events
key = 'all'
self.cardinality_cache.setdefault(key, {})
self.first_event.setdefault(key, event[self.ts_field])
value = hashable(lookup_es_key(event, self.cardinality_field))
if value is not None:
# Store this timestamp as most recent occurence of the term
self.cardinality_cache[key][value] = event[self.ts_field]
self.check_for_match(key, event)
def check_for_match(self, key, event, gc=True):
# Check to see if we are past max/min_cardinality for a given key
timeframe_elapsed = event[self.ts_field] - self.first_event.get(key, event[self.ts_field]) > self.timeframe
if (len(self.cardinality_cache[key]) > self.rules.get('max_cardinality', float('inf')) or
(len(self.cardinality_cache[key]) < self.rules.get('min_cardinality', float('-inf')) and timeframe_elapsed)):
# If there might be a match, run garbage collect first, as outdated terms are only removed in GC
# Only run it if there might be a match so it doesn't impact performance
if gc:
self.garbage_collect(event[self.ts_field])
self.check_for_match(key, event, False)
else:
self.first_event.pop(key, None)
self.add_match(event)
def garbage_collect(self, timestamp):
""" Remove all occurrence data that is beyond the timeframe away """
for qk, terms in self.cardinality_cache.items():
for term, last_occurence in terms.items():
if timestamp - last_occurence > self.rules['timeframe']:
self.cardinality_cache[qk].pop(term)
# Create a placeholder event for if a min_cardinality match occured
if 'min_cardinality' in self.rules:
event = {self.ts_field: timestamp}
if 'query_key' in self.rules:
event.update({self.rules['query_key']: qk})
self.check_for_match(qk, event, False)
def get_match_str(self, match):
lt = self.rules.get('use_local_time')
starttime = pretty_ts(dt_to_ts(ts_to_dt(match[self.ts_field]) - self.rules['timeframe']), lt)
endtime = pretty_ts(match[self.ts_field], lt)
if 'max_cardinality' in self.rules:
message = ('A maximum of %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['max_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
else:
message = ('Less than %d unique %s(s) occurred since last alert or between %s and %s\n\n' % (self.rules['min_cardinality'],
self.rules['cardinality_field'],
starttime, endtime))
return message
class BaseAggregationRule(RuleType):
def __init__(self, *args):
super(BaseAggregationRule, self).__init__(*args)
bucket_interval = self.rules.get('bucket_interval')
if bucket_interval:
if 'seconds' in bucket_interval:
self.rules['bucket_interval_period'] = str(bucket_interval['seconds']) + 's'
elif 'minutes' in bucket_interval:
self.rules['bucket_interval_period'] = str(bucket_interval['minutes']) + 'm'
elif 'hours' in bucket_interval:
self.rules['bucket_interval_period'] = str(bucket_interval['hours']) + 'h'
elif 'days' in bucket_interval:
self.rules['bucket_interval_period'] = str(bucket_interval['days']) + 'd'
elif 'weeks' in bucket_interval:
self.rules['bucket_interval_period'] = str(bucket_interval['weeks']) + 'w'
else:
raise EAException("Unsupported window size")
if self.rules.get('use_run_every_query_size'):
if total_seconds(self.rules['run_every']) % total_seconds(self.rules['bucket_interval_timedelta']) != 0:
raise EAException("run_every must be evenly divisible by bucket_interval if specified")
else:
if total_seconds(self.rules['buffer_time']) % total_seconds(self.rules['bucket_interval_timedelta']) != 0:
raise EAException("Buffer_time must be evenly divisible by bucket_interval if specified")
def generate_aggregation_query(self):
raise NotImplementedError()
def add_aggregation_data(self, payload):
for timestamp, payload_data in payload.iteritems():
if 'interval_aggs' in payload_data:
self.unwrap_interval_buckets(timestamp, None, payload_data['interval_aggs']['buckets'])
elif 'bucket_aggs' in payload_data:
self.unwrap_term_buckets(timestamp, payload_data['bucket_aggs']['buckets'])
else:
self.check_matches(timestamp, None, payload_data)
def unwrap_interval_buckets(self, timestamp, query_key, interval_buckets):
for interval_data in interval_buckets:
# Use bucket key here instead of start_time for more accurate match timestamp
self.check_matches(ts_to_dt(interval_data['key_as_string']), query_key, interval_data)
def unwrap_term_buckets(self, timestamp, term_buckets):
for term_data in term_buckets:
if 'interval_aggs' in term_data:
self.unwrap_interval_buckets(timestamp, term_data['key'], term_data['interval_aggs']['buckets'])
else:
self.check_matches(timestamp, term_data['key'], term_data)
def check_matches(self, timestamp, query_key, aggregation_data):
raise NotImplementedError()
class MetricAggregationRule(BaseAggregationRule):
""" A rule that matches when there is a low number of events given a timeframe. """
required_options = frozenset(['metric_agg_key', 'metric_agg_type', 'doc_type'])
allowed_aggregations = frozenset(['min', 'max', 'avg', 'sum', 'cardinality', 'value_count'])
def __init__(self, *args):
super(MetricAggregationRule, self).__init__(*args)
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
if 'max_threshold' not in self.rules and 'min_threshold' not in self.rules:
raise EAException("MetricAggregationRule must have at least one of either max_threshold or min_threshold")
self.metric_key = self.rules['metric_agg_key'] + '_' + self.rules['metric_agg_type']
if not self.rules['metric_agg_type'] in self.allowed_aggregations:
raise EAException("metric_agg_type must be one of %s" % (str(self.allowed_aggregations)))
self.rules['aggregation_query_element'] = self.generate_aggregation_query()
def get_match_str(self, match):
message = 'Threshold violation, %s:%s %s (min: %s max : %s) \n\n' % (
self.rules['metric_agg_type'],
self.rules['metric_agg_key'],
match[self.metric_key],
self.rules.get('min_threshold'),
self.rules.get('max_threshold')
)
return message
def generate_aggregation_query(self):
return {self.metric_key: {self.rules['metric_agg_type']: {'field': self.rules['metric_agg_key']}}}
def check_matches(self, timestamp, query_key, aggregation_data):
if "compound_query_key" in self.rules:
self.check_matches_recursive(timestamp, query_key, aggregation_data, self.rules['compound_query_key'], dict())
else:
metric_val = aggregation_data[self.metric_key]['value']
if self.crossed_thresholds(metric_val):
match = {self.rules['timestamp_field']: timestamp,
self.metric_key: metric_val}
if query_key is not None:
match[self.rules['query_key']] = query_key
self.add_match(match)
def check_matches_recursive(self, timestamp, query_key, aggregation_data, compound_keys, match_data):
if len(compound_keys) < 1:
# shouldn't get to this point, but checking for safety
return
match_data[compound_keys[0]] = aggregation_data['key']
if 'bucket_aggs' in aggregation_data:
for result in aggregation_data['bucket_aggs']['buckets']:
self.check_matches_recursive(timestamp,
query_key,
result,
compound_keys[1:],
match_data)
else:
metric_val = aggregation_data[self.metric_key]['value']
if self.crossed_thresholds(metric_val):
match_data[self.rules['timestamp_field']] = timestamp
match_data[self.metric_key] = metric_val
# add compound key to payload to allow alerts to trigger for every unique occurence
compound_value = [match_data[key] for key in self.rules['compound_query_key']]
match_data[self.rules['query_key']] = ",".join(compound_value)
self.add_match(match_data)
def crossed_thresholds(self, metric_value):
if metric_value is None:
return False
if 'max_threshold' in self.rules and metric_value > self.rules['max_threshold']:
return True
if 'min_threshold' in self.rules and metric_value < self.rules['min_threshold']:
return True
return False
class PercentageMatchRule(BaseAggregationRule):
required_options = frozenset(['match_bucket_filter'])
def __init__(self, *args):
super(PercentageMatchRule, self).__init__(*args)
self.ts_field = self.rules.get('timestamp_field', '@timestamp')
if 'max_percentage' not in self.rules and 'min_percentage' not in self.rules:
raise EAException("PercentageMatchRule must have at least one of either min_percentage or max_percentage")
self.match_bucket_filter = self.rules['match_bucket_filter']
self.rules['aggregation_query_element'] = self.generate_aggregation_query()
def get_match_str(self, match):
percentage_format_string = self.rules.get('percentage_format_string', None)
message = 'Percentage violation, value: %s (min: %s max : %s) of %s items\n\n' % (
percentage_format_string % (match['percentage']) if percentage_format_string else match['percentage'],
self.rules.get('min_percentage'),
self.rules.get('max_percentage'),
match['denominator']
)
return message
def generate_aggregation_query(self):
return {
'percentage_match_aggs': {
'filters': {
'other_bucket': True,
'filters': {
'match_bucket': {
'bool': {
'must': self.match_bucket_filter
}
}
}
}
}
}
def check_matches(self, timestamp, query_key, aggregation_data):
match_bucket_count = aggregation_data['percentage_match_aggs']['buckets']['match_bucket']['doc_count']
other_bucket_count = aggregation_data['percentage_match_aggs']['buckets']['_other_']['doc_count']
if match_bucket_count is None or other_bucket_count is None:
return
else:
total_count = other_bucket_count + match_bucket_count
if total_count == 0:
return
else:
match_percentage = (match_bucket_count * 1.0) / (total_count * 1.0) * 100
if self.percentage_violation(match_percentage):
match = {self.rules['timestamp_field']: timestamp, 'percentage': match_percentage, 'denominator': total_count}
if query_key is not None:
match[self.rules['query_key']] = query_key
self.add_match(match)
def percentage_violation(self, match_percentage):
if 'max_percentage' in self.rules and match_percentage > self.rules['max_percentage']:
return True
if 'min_percentage' in self.rules and match_percentage < self.rules['min_percentage']:
return True
return False
|
py | 1a53fe4b18497b22c41e941b4d8a29e621cb8245 | from itertools import count
from typing import Iterable, Iterator
def as_range(iterable: Iterable[int]) -> str:
"""From https://codereview.stackexchange.com/q/5196
If {iterable} has at least two elements, return '{first}-{last}', otherwise '{first}'.
"""
items = list(iterable)
if len(items) > 1:
return f"{items[0]}-{items[-1]}"
else:
return f"{items[0]}"
def groupby_range(x: int, c: Iterator[int] = count()) -> int:
return next(c) - x
def groupby_inverse_range(x: int, c: Iterator[int] = count()) -> int:
return x - next(c)
|
py | 1a53feac28ae612fb77eac6768f26106fb8cb8c7 | # coding: utf-8
import sys
sys.path.append('..')
from Natural_Language_Processing.common.util import preprocess, create_co_matrix, most_similar
text = 'You say goodbye and I say hello.'
corpus, word_to_id, id_to_word = preprocess(text)
vocab_size = len(word_to_id)
C = create_co_matrix(corpus, vocab_size)
most_similar('you', word_to_id, id_to_word, C, top=5)
|
py | 1a53ff4ebccae2605ff80c44fe3d46992b755a20 | #!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
import tf
import cv2
import yaml
from scipy.spatial import KDTree
STATE_COUNT_THRESHOLD = 3
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.camera_image = None
self.lights = []
self.waypoints_2d = None
self.waypoint_tree = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
# closest_idx = None
# if self.waypoint_tree:
# closest_idx = self.waypoint_tree.query([x,y], 1)[1]
# return closest_idx
return self.waypoint_tree.query([x, y], 1)[1]
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# if(not self.has_image):
# self.prev_light_loc = None
# return False
# cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, "bgr8")
#Get classification
# return self.light_classifier.get_classification(cv_image)
return light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_light = None
line_wp_idx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if self.pose and self.waypoints:
#car_position = self.get_closest_waypoint(self.pose.pose)
car_wp_idx = self.get_closest_waypoint(self.pose.pose.position.x, self.pose.pose.position.y)
diff = len(self.waypoints.waypoints) # we have 8 intersections
for i, light in enumerate(self.lights):
# Get stop line waypoint index
line = stop_line_positions[i]
temp_wp_idx = self.get_closest_waypoint(line[0], line[1])
# Find closest stop line waypoint index
d = temp_wp_idx - car_wp_idx
if d >= 0 and d < diff:
diff = d
closest_light = light
line_wp_idx = temp_wp_idx
if closest_light:
state = self.get_light_state(closest_light)
return line_wp_idx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
|
py | 1a54013fb4e8d26e34990c3b26fa1413bb9a2ab5 | import pytest
from wemake_python_styleguide.violations.best_practices import (
ProtectedModuleMemberViolation,
ProtectedModuleViolation,
)
from wemake_python_styleguide.visitors.ast.imports import WrongImportVisitor
import_public = 'import public'
import_protected = 'import _protected'
import_from_protected = 'from _protected import something'
import_from_protected_path = 'from path._protected import something'
import_protected_from = 'from some.path import _protected'
import_from_public = 'from public import something'
import_from_public_path = 'from public.path import something'
import_protected_as_alias = 'from some.path import _protected as not_protected'
@pytest.mark.parametrize('code', [
import_public,
import_from_public,
import_from_public_path,
])
def test_correct_import(
assert_errors,
parse_ast_tree,
code,
default_options,
):
"""Testing that correct imports are allowed."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [])
@pytest.mark.parametrize('code', [
import_protected,
import_from_protected,
import_from_protected_path,
])
def test_incorrect_modules_import(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
default_options,
):
"""Testing that imports from protected modules are restricted."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ProtectedModuleViolation])
assert_error_text(visitor, '_protected')
@pytest.mark.parametrize('code', [
import_protected_from,
import_protected_as_alias,
])
def test_incorrect_module_members_import(
assert_errors,
assert_error_text,
parse_ast_tree,
code,
default_options,
):
"""Testing that importing of protected objects is restricted."""
tree = parse_ast_tree(code)
visitor = WrongImportVisitor(default_options, tree=tree)
visitor.run()
assert_errors(visitor, [ProtectedModuleMemberViolation])
assert_error_text(visitor, '_protected')
|
py | 1a5401cfb0d8a521a68c89fbeb5bc6f8d03a7a95 | # First Party
from metadamage.cli import cli_main
cli_main()
|
py | 1a540200b73a837eda08d487631abbf9a109f4a5 | class CylindricalSurface(Surface,IDisposable):
""" A cylindrical surface. """
@staticmethod
def Create(frameOfReference,radius):
"""
Create(frameOfReference: Frame,radius: float) -> CylindricalSurface
Construct a cylindrical surface defined by a local coordinate system and a
radius.
frameOfReference: frameOfReference is an orthonormal frame that defines a local coordinate system
for the cylinder.
Frame.Origin is a point on the cylinder's axis.
Frame.BasisZ points along the axis,while Frame.BasisX and Frame.BasisY are
orthogonal to the axis. The frame may be either left-handed or right-handed
(see Frame.IsRightHanded). Note that
the "handedness" of the frame does
not,by itself,determine the surface's orientation.
radius: Radius of the circle that defines the base of the cylindrical surface.
Returns: The created CylindricalSurface.
"""
pass
def Dispose(self):
""" Dispose(self: Surface,A_0: bool) """
pass
def GetFrameOfReference(self):
"""
GetFrameOfReference(self: CylindricalSurface) -> Frame
Returns frame of reference associated with this CylindricalSurface.
Returns: Frame of reference associated with this CylindricalSurface.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: Surface,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
Axis=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Axis of the cylinder. This is the Z axis of the local coordinate system associated with this cylinder.
Get: Axis(self: CylindricalSurface) -> XYZ
"""
Origin=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Center of the circle that defines the base of the cylinder. This is the origin of the local coordinate system associated with this cylinder.
Get: Origin(self: CylindricalSurface) -> XYZ
"""
Radius=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Radius of the circle that defines the base of this cylinder.
Get: Radius(self: CylindricalSurface) -> float
"""
XDir=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""X axis of the local coordinate system associated with this cylinder.
Get: XDir(self: CylindricalSurface) -> XYZ
"""
YDir=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""X axis of the local coordinate system associated with this cylinder.
Get: YDir(self: CylindricalSurface) -> XYZ
"""
|
py | 1a54022758c61352dd8dcf4268657c38269e034b | # -*- coding: utf-8 -*-
# Copyright 2019 Cohesity Inc.
import cohesity_management_sdk.models.run_job_snapshot_target
import cohesity_management_sdk.models.run_now_parameters
class RunProtectionJobParam(object):
"""Implementation of the 'RunProtectionJobParam' model.
Specify the parameters to run a protection job.
Attributes:
copy_run_targets (list of RunJobSnapshotTarget): Optional parameter to
be set if you want specific replication or archival associated
with the policy to run.
run_now_parameters (list of RunNowParameters): Optional parameters of
a Run Now operation.
run_type (RunTypeRunProtectionJobParamEnum): Specifies the type of
backup. If not specified, 'kRegular' is assumed. 'kRegular'
indicates a incremental (CBT) backup. Incremental backups
utilizing CBT (if supported) are captured of the target protection
objects. The first run of a kRegular schedule captures all the
blocks. 'kFull' indicates a full (no CBT) backup. A complete
backup (all blocks) of the target protection objects are always
captured and Change Block Tracking (CBT) is not utilized. 'kLog'
indicates a Database Log backup. Capture the database transaction
logs to allow rolling back to a specific point in time. 'kSystem'
indicates a system backup. System backups are used to do bare
metal recovery of the system to a specific point in time.
source_ids (list of long|int): Optional parameter if you want to back
up only a subset of sources that are protected by the job in this
run. If a Run Now operation is to be performed then the source ids
should only be provided in the runNowParameters along with the
database Ids.
"""
# Create a mapping from Model property names to API property names
_names = {
"copy_run_targets":'copyRunTargets',
"run_now_parameters":'runNowParameters',
"run_type":'runType',
"source_ids":'sourceIds'
}
def __init__(self,
copy_run_targets=None,
run_now_parameters=None,
run_type=None,
source_ids=None):
"""Constructor for the RunProtectionJobParam class"""
# Initialize members of the class
self.copy_run_targets = copy_run_targets
self.run_now_parameters = run_now_parameters
self.run_type = run_type
self.source_ids = source_ids
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
copy_run_targets = None
if dictionary.get('copyRunTargets') != None:
copy_run_targets = list()
for structure in dictionary.get('copyRunTargets'):
copy_run_targets.append(cohesity_management_sdk.models.run_job_snapshot_target.RunJobSnapshotTarget.from_dictionary(structure))
run_now_parameters = None
if dictionary.get('runNowParameters') != None:
run_now_parameters = list()
for structure in dictionary.get('runNowParameters'):
run_now_parameters.append(cohesity_management_sdk.models.run_now_parameters.RunNowParameters.from_dictionary(structure))
run_type = dictionary.get('runType')
source_ids = dictionary.get('sourceIds')
# Return an object of this model
return cls(copy_run_targets,
run_now_parameters,
run_type,
source_ids)
|
py | 1a54024eb011568ef2d0e16a2b77666f37a5465c | #!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from locust import HttpUser, task, TaskSet, between
username = "username"
password = "password"
products = [
'0PUK6V6EV0',
'1YMWWN1N4O',
'2ZYFJ3GM2N',
'66VCHSJNUP',
'6E92ZMYYFZ',
'9SIQT8TOJO',
'L9ECAV7KIM',
'LS4PSXUNUM',
'OLJCESPC7Z']
#class UserBehavior(TaskSet):
#
# def on_start(self):
# index(self)
#
# tasks = {index: 1,
# setCurrency: 2,
# browseProduct: 10,
# addToCart: 2,
# viewCart: 3,
# checkout: 1}
class WebsiteUser(HttpUser):
wait_time = between(0.1,1)
#task_set = UserBehavior
#min_wait = 1000
#max_wait = 10000
@task(1)
def index(l):
l.client.get("/")
@task(2)
def setCurrency(l):
currencies = ['EUR', 'USD', 'JPY', 'CAD']
l.client.post("/setCurrency",
{'currency_code': random.choice(currencies)})
@task(10)
def browseProduct(l):
l.client.get("/product/" + random.choice(products))
@task(3)
def viewCart(l):
l.client.get("/cart")
@task(2)
def addToCart(l):
product = random.choice(products)
l.client.get("/product/" + product)
l.client.post("/cart", {
'product_id': product,
'quantity': random.choice([1,2,3,4,5,10])})
@task(1)
def checkout(l):
product = random.choice(products)
l.client.get("/product/" + product)
l.client.post("/cart", {
'product_id': product,
'quantity': random.choice([1,2,3,4,5,10])})
l.client.post("/cart/checkout", {
'email': '[email protected]',
'street_address': '1600 Amphitheatre Parkway',
'zip_code': '94043',
'city': 'Mountain View',
'state': 'CA',
'country': 'United States',
'credit_card_number': '4432-8015-6152-0454',
'credit_card_expiration_month': '1',
'credit_card_expiration_year': '2039',
'credit_card_cvv': '672',
})
|
py | 1a54025f327306a6502bf22bdc5696fd382003b7 | import pandas as pd
import numpy as np
# Define functions for model
def confirmed_to_onset(confirmed, p_delay, col_name='num_cases', min_onset_date=None):
min_onset_date = pd.to_datetime(min_onset_date)
# Reverse cases so that we convolve into the past
convolved = np.convolve(np.squeeze(confirmed.iloc[::-1].values), p_delay)
# Calculate the new date range
dr = pd.date_range(end=confirmed.index[-1],
periods=len(convolved))
# Flip the values and assign the date range
onset = pd.Series(np.flip(convolved), index=dr, name=col_name)
if min_onset_date:
onset = np.round(onset.loc[min_onset_date:])
else:
onset = np.round(onset.iloc[onset.values>=1])
onset.index.name = 'date'
return pd.DataFrame(onset)
# Smooths cases using a rolling window and gaussian sampling
def prepare_cases(daily_cases, col='num_cases', out_col=None, cutoff=0):
if not out_col:
out_col = 'smoothed_'+str(col)
daily_cases[out_col] = daily_cases[col].rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2).round()
idx_start = np.searchsorted(daily_cases[out_col], cutoff)
daily_cases[out_col] = daily_cases[out_col].iloc[idx_start:]
return daily_cases
# Smooths cases using a rolling window and gaussian sampling
def smooth_1d(signal, col='num_cases', out_col=None, cutoff=0):
if not out_col:
out_col = 'smoothed_'+str(col)
signal[out_col] = signal[col].rolling(7,
win_type='gaussian',
min_periods=1,
center=True).mean(std=2)
idx_start = np.searchsorted(signal[out_col], cutoff)
signal[out_col] = signal[out_col].iloc[idx_start:]
return signal |
py | 1a540336ba2d97bb51868c41ea7df7cae579159e | from . import *
class AWS_CodeStar_GitHubRepository_S3(CloudFormationProperty):
def write(self, w):
with w.block("s3"):
self.property(w, "ObjectVersion", "object_version", StringValueConverter())
self.property(w, "Bucket", "bucket", StringValueConverter())
self.property(w, "Key", "key", StringValueConverter())
class AWS_CodeStar_GitHubRepository_Code(CloudFormationProperty):
def write(self, w):
with w.block("code"):
self.block(w, "S3", AWS_CodeStar_GitHubRepository_S3)
class AWS_CodeStar_GitHubRepository(CloudFormationResource):
cfn_type = "AWS::CodeStar::GitHubRepository"
tf_type = "aws_code_star_git_hub_repository" # TODO: Most likely not working
ref = "arn"
attrs = {}
def write(self, w):
with self.resource_block(w):
self.property(w, "EnableIssues", "enable_issues", BasicValueConverter())
self.property(w, "RepositoryName", "repository_name", StringValueConverter())
self.property(w, "RepositoryAccessToken", "repository_access_token", StringValueConverter())
self.property(w, "RepositoryOwner", "repository_owner", StringValueConverter())
self.property(w, "IsPrivate", "is_private", BasicValueConverter())
self.block(w, "Code", AWS_CodeStar_GitHubRepository_Code)
self.property(w, "RepositoryDescription", "repository_description", StringValueConverter())
|
py | 1a54035dfe1a1917eb2340d65bf471eb022666ad | #!/usr/bin/env python
##############################################################################
# Copyright (c) 2015 Huawei Technologies Co.,Ltd and others.
#
# All rights reserved. This program and the accompanying materials
# are made available under the terms of the Apache License, Version 2.0
# which accompanies this distribution, and is available at
# http://www.apache.org/licenses/LICENSE-2.0
##############################################################################
# Unittest for yardstick.benchmark.contexts.dummy
from __future__ import absolute_import
import unittest
from yardstick.benchmark.contexts import dummy
class DummyContextTestCase(unittest.TestCase):
def setUp(self):
self.test_context = dummy.DummyContext()
def test__get_server(self):
self.test_context.init(None)
self.test_context.deploy()
result = self.test_context._get_server(None)
self.assertEqual(result, None)
self.test_context.undeploy()
|
py | 1a540365e44233e4a1c814e8194ac3104486a116 | # -*- coding: utf-8 -*-
"""
meraki_sdk
This file was automatically generated for meraki by APIMATIC v2.0 ( https://apimatic.io ).
"""
class GetHelpSubtabEnum(object):
"""Implementation of the 'GetHelpSubtab' enum.
The 'Help -> Get Help' subtab on which Cisco Meraki KB, Product Manuals,
and Support/Case Information are displayed. Note
that if this subtab is hidden, branding customizations for the KB on
'Get help', Cisco Meraki product documentation,
and support contact info will not be visible. Can be one of 'default
or inherit', 'hide' or 'show'.
Attributes:
ENUM_DEFAULT OR INHERIT: TODO: type description here.
HIDE: TODO: type description here.
SHOW: TODO: type description here.
"""
ENUM_DEFAULT_OR_INHERIT = 'default or inherit'
HIDE = 'hide'
SHOW = 'show'
|
py | 1a540557a8f0b56cd624877296fd14d1e52515ef | """empty message
Revision ID: 5e78cc772642
Revises: ec21bd75ea92
Create Date: 2020-07-22 22:44:45.754328
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5e78cc772642'
down_revision = 'ec21bd75ea92'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('recipe',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('recipe_name', sa.Text(), nullable=True),
sa.Column('recipe_link', sa.Text(), nullable=True),
sa.Column('image_link', sa.Text(), nullable=True),
sa.Column('instructions', sa.Text(), nullable=True),
sa.Column('servings', sa.Text(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)
op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)
op.drop_index('ix_users_email', table_name='users')
op.drop_index('ix_users_username', table_name='users')
op.drop_table('users')
op.drop_table('recipes')
op.drop_constraint(None, 'ingredients', type_='foreignkey')
op.create_foreign_key(None, 'ingredients', 'recipe', ['recipe_id'], ['id'])
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.create_foreign_key(None, 'saved_recipes', 'user', ['user_id'], ['id'])
op.create_foreign_key(None, 'saved_recipes', 'recipe', ['recipe_id'], ['id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.drop_constraint(None, 'saved_recipes', type_='foreignkey')
op.create_foreign_key(None, 'saved_recipes', 'users', ['user_id'], ['id'])
op.create_foreign_key(None, 'saved_recipes', 'recipes', ['recipe_id'], ['id'])
op.drop_constraint(None, 'ingredients', type_='foreignkey')
op.create_foreign_key(None, 'ingredients', 'recipes', ['recipe_id'], ['id'])
op.create_table('recipes',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('recipe_name', sa.TEXT(), nullable=True),
sa.Column('recipe_link', sa.TEXT(), nullable=True),
sa.Column('image_link', sa.TEXT(), nullable=True),
sa.Column('instructions', sa.TEXT(), nullable=True),
sa.Column('servings', sa.TEXT(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('users',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('username', sa.VARCHAR(length=64), nullable=True),
sa.Column('email', sa.VARCHAR(length=120), nullable=True),
sa.Column('password_hash', sa.VARCHAR(length=128), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index('ix_users_username', 'users', ['username'], unique=1)
op.create_index('ix_users_email', 'users', ['email'], unique=1)
op.drop_index(op.f('ix_user_username'), table_name='user')
op.drop_index(op.f('ix_user_email'), table_name='user')
op.drop_table('user')
op.drop_table('recipe')
# ### end Alembic commands ###
|
py | 1a5406576678d4a59c4c97fec925d29bbc85fa0a | import numpy as np
def validate_points(points: np.array) -> np.array:
# If the user is tracking only a single point, reformat it slightly.
if points.shape == (2,):
points = points[np.newaxis, ...]
elif len(points.shape) == 1:
raise_detection_error(points)
else:
if points.shape[1] != 2 or len(points.shape) > 2:
raise_detection_error(points)
return points
def raise_detection_error(points):
message = (
f"Each `Detection` object should have a property `points` of shape (num_of_points_to_track, 2), not {points.shape}. "
"Check your `Detection` list creation code. "
"You can read the documentation for the `Detection` class here: "
"https://github.com/tryolabs/norfair/tree/master/docs#detection\n"
)
raise ValueError(message)
|
py | 1a5406add8ede70229ed0b2ce91679f2285ba782 | """Visualize learned representation."""
import os
import argparse
import importlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.pylab as pylab
params = {'legend.fontsize': 'large',
'axes.labelsize': 'x-large',
'axes.titlesize':'x-large',
'xtick.labelsize':'large',
'ytick.labelsize':'large'}
pylab.rcParams.update(params)
parser = argparse.ArgumentParser()
parser.add_argument('--log_base_dir', type=str,
default=os.path.join(os.getcwd(), 'log'))
parser.add_argument('--output_sub_dir', type=str,
default='learning_curves')
FLAGS = parser.parse_args()
def main():
# setup log directories
output_dir = os.path.join(FLAGS.log_base_dir, FLAGS.output_sub_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
envs = ['OneRoom', 'TwoRoom', 'HardMaze']
r_modes = ['sparse', 'mix', 'l2', 'rawmix']
colors = ['royalblue', 'darkorange', 'seagreen', 'tomato']
linestyles = ['--', '-', '-.', ':']
linewidth = 3
for env_id in envs:
loaded_results = {}
for r_mode in r_modes:
log_dir = os.path.join(
FLAGS.log_base_dir, 'dqn_repr', env_id, r_mode)
results_file = os.path.join(log_dir, 'results.csv')
results = np.loadtxt(results_file, delimiter=',')
loaded_results[r_mode] = results
# plot
handles = []
for r_mode, c, ls in zip(r_modes, colors, linestyles):
x = loaded_results[r_mode][:, 0]
y = loaded_results[r_mode][:, 1]
h, = plt.plot(x, y, color=c, linestyle=ls, linewidth=linewidth,
label=r_mode)
handles.append(h)
plt.title(env_id)
plt.legend(handles=handles)
plt.xlabel('train steps')
plt.ylabel('episodic returns')
figfile = os.path.join(output_dir, '{}.png'.format(env_id))
plt.savefig(figfile, bbox_inches='tight')
plt.clf()
print('Plot saved at {}.'.format(figfile))
if __name__ == '__main__':
main()
|
py | 1a5409a3c4bc073414db2807b6eaf080fd1ef031 | import abcd
@abcd.s
class A(object):
a = abcd.ib()
A(a="test") |
py | 1a540a866326ac64662207cbb2ea43a6b25187fa | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/workspace/CarNd-Capstone/ros/devel/include".split(';') if "/home/workspace/CarNd-Capstone/ros/devel/include" != "" else []
PROJECT_CATKIN_DEPENDS = "message_runtime;std_msgs;geometry_msgs".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "dbw_mkz_msgs"
PROJECT_SPACE_DIR = "/home/workspace/CarNd-Capstone/ros/devel"
PROJECT_VERSION = "1.0.12"
|
py | 1a540b9c6ca90a9f75ab0e5dd802a80ba1bc47e8 | import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
methods = utils.get_methods(uast)
for m in methods:
# Should look at the roles to filter by Boolean but there is a bug in the
# Java driver https://github.com/bblf../../java-driver/issues/83 so we check the token
if m.return_ and m.return_.type_name == 'boolean':
if any(list(bblfsh.filter(m.body, "//*[@roleReturn]//*[@roleNull]"))):
findings.append({"msg": "Don't return Null on Boolean-return methods"})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
|
py | 1a540c50f858592d98c05be848f7f4fdc39dead2 | # import tweepy
import json
import time
import gunicorn
import tweepy
from text_to_image import generate_media
import datetime
def update_index():
"Updates samples index from which tweets are being generated"
with open('counter.json', 'r+') as f:
data = json.load(f)
data['last_index'] = data['last_index'] + 1 # <--- add `id` value.
f.seek(0) # <--- should reset file position to the beginning.
json.dump(data, f, indent=4)
f.truncate() # remove remaining part
def get_start_index():
"Gets the current index of generated samples"
with open('counter.json', 'r') as f:
data = json.load(f)
index = data['last_index'] # <--- add `id` value.
return index
with open('data/generated/samples.json','r') as f:
scripts = json.load(f)
with open("env/auth.json", "r") as f:
data = json.load(f)
auth = tweepy.OAuthHandler(data['api_key'], data['api_secret'])
auth.set_access_token(data['access_token'], data['access_token_secret'])
starttime = time.time()
print("Bot Online")
while True:
index = get_start_index()
update_index()
try:
generate_media(scripts[index])
print("I get here")
api = tweepy.API(auth)
media = api.media_upload('images/script_output.jpeg')
api.update_status(status=f"{index}:{scripts[index][:100].strip()}...", media_ids=[media.media_id])
print("Updated twitter with sample of index ", index)
except BaseException as e:
print('Something went wrong')
print(e)
print('working still')
time.sleep(4_600 - time.time() % 4_600)
|
py | 1a540d00da88cbf39e5b98152b3c3beb2a9ae940 | """
Tests for DatetimeIndex methods behaving like their Timestamp counterparts
"""
from datetime import datetime
import numpy as np
import pytest
from pandas._libs.tslibs import OutOfBoundsDatetime, to_offset
from pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG
import pandas as pd
from pandas import DatetimeIndex, Timestamp, date_range
import pandas._testing as tm
class TestDatetimeIndexOps:
def test_dti_time(self):
rng = date_range("1/1/2000", freq="12min", periods=10)
result = pd.Index(rng).time
expected = [t.time() for t in rng]
assert (result == expected).all()
def test_dti_date(self):
rng = date_range("1/1/2000", freq="12H", periods=10)
result = pd.Index(rng).date
expected = [t.date() for t in rng]
assert (result == expected).all()
@pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])
def test_dti_date_out_of_range(self, data):
# GH#1475
msg = "Out of bounds nanosecond timestamp: 1400-01-01 00:00:00"
with pytest.raises(OutOfBoundsDatetime, match=msg):
DatetimeIndex(data)
@pytest.mark.parametrize(
"field",
[
"dayofweek",
"day_of_week",
"dayofyear",
"day_of_year",
"quarter",
"days_in_month",
"is_month_start",
"is_month_end",
"is_quarter_start",
"is_quarter_end",
"is_year_start",
"is_year_end",
],
)
def test_dti_timestamp_fields(self, field):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
expected = getattr(idx, field)[-1]
result = getattr(Timestamp(idx[-1]), field)
assert result == expected
def test_dti_timestamp_isocalendar_fields(self):
idx = tm.makeDateIndex(100)
expected = tuple(idx.isocalendar().iloc[-1].to_list())
result = idx[-1].isocalendar()
assert result == expected
def test_dti_timestamp_freq_fields(self):
# extra fields from DatetimeIndex like quarter and week
idx = tm.makeDateIndex(100)
assert idx.freq == Timestamp(idx[-1], idx.freq).freq
assert idx.freqstr == Timestamp(idx[-1], idx.freq).freqstr
# ----------------------------------------------------------------
# DatetimeIndex.round
def test_round_daily(self):
dti = date_range("20130101 09:10:11", periods=5)
result = dti.round("D")
expected = date_range("20130101", periods=5)
tm.assert_index_equal(result, expected)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
result = dti.round("D")
expected = date_range("20130101", periods=5).tz_localize("US/Eastern")
tm.assert_index_equal(result, expected)
result = dti.round("s")
tm.assert_index_equal(result, dti)
@pytest.mark.parametrize(
"freq, error_msg",
[
("Y", "<YearEnd: month=12> is a non-fixed frequency"),
("M", "<MonthEnd> is a non-fixed frequency"),
("foobar", "Invalid frequency: foobar"),
],
)
def test_round_invalid(self, freq, error_msg):
dti = date_range("20130101 09:10:11", periods=5)
dti = dti.tz_localize("UTC").tz_convert("US/Eastern")
with pytest.raises(ValueError, match=error_msg):
dti.round(freq)
def test_round(self, tz_naive_fixture):
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz)
elt = rng[1]
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 00:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 01:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
Timestamp("2016-01-01 02:00:00", tz=tz, freq="30T"),
]
)
expected_elt = expected_rng[1]
tm.assert_index_equal(rng.round(freq="H"), expected_rng)
assert elt.round(freq="H") == expected_elt
msg = INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
rng.round(freq="foo")
with pytest.raises(ValueError, match=msg):
elt.round(freq="foo")
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
rng.round(freq="M")
with pytest.raises(ValueError, match=msg):
elt.round(freq="M")
# GH#14440 & GH#15578
index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz)
tm.assert_index_equal(result, expected)
for freq in ["us", "ns"]:
tm.assert_index_equal(index, index.round(freq))
index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz)
result = index.round("ms")
expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz)
tm.assert_index_equal(result, expected)
index = DatetimeIndex(["2016-10-17 12:00:00.001501031"])
result = index.round("10ns")
expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"])
tm.assert_index_equal(result, expected)
with tm.assert_produces_warning(False):
ts = "2016-10-17 12:00:00.001501031"
DatetimeIndex([ts]).round("1010ns")
def test_no_rounding_occurs(self, tz_naive_fixture):
# GH 21262
tz = tz_naive_fixture
rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)
expected_rng = DatetimeIndex(
[
Timestamp("2016-01-01 00:00:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:02:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:04:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:06:00", tz=tz, freq="2T"),
Timestamp("2016-01-01 00:08:00", tz=tz, freq="2T"),
]
)
tm.assert_index_equal(rng.round(freq="2T"), expected_rng)
@pytest.mark.parametrize(
"test_input, rounder, freq, expected",
[
(["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),
(["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),
(
["2117-01-01 00:00:45.000000012"],
"floor",
"10ns",
["2117-01-01 00:00:45.000000010"],
),
(
["1823-01-01 00:00:01.000000012"],
"ceil",
"10ns",
["1823-01-01 00:00:01.000000020"],
),
(["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),
(["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),
(["2018-01-01 00:15:00"], "ceil", "15T", ["2018-01-01 00:15:00"]),
(["2018-01-01 00:15:00"], "floor", "15T", ["2018-01-01 00:15:00"]),
(["1823-01-01 03:00:00"], "ceil", "3H", ["1823-01-01 03:00:00"]),
(["1823-01-01 03:00:00"], "floor", "3H", ["1823-01-01 03:00:00"]),
(
("NaT", "1823-01-01 00:00:01"),
"floor",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
(
("NaT", "1823-01-01 00:00:01"),
"ceil",
"1s",
("NaT", "1823-01-01 00:00:01"),
),
],
)
def test_ceil_floor_edge(self, test_input, rounder, freq, expected):
dt = DatetimeIndex(list(test_input))
func = getattr(dt, rounder)
result = func(freq)
expected = DatetimeIndex(list(expected))
assert expected.equals(result)
@pytest.mark.parametrize(
"start, index_freq, periods",
[("2018-01-01", "12H", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],
)
@pytest.mark.parametrize(
"round_freq",
[
"2ns",
"3ns",
"4ns",
"5ns",
"6ns",
"7ns",
"250ns",
"500ns",
"750ns",
"1us",
"19us",
"250us",
"500us",
"750us",
"1s",
"2s",
"3s",
"12H",
"1D",
],
)
def test_round_int64(self, start, index_freq, periods, round_freq):
dt = date_range(start=start, freq=index_freq, periods=periods)
unit = to_offset(round_freq).nanos
# test floor
result = dt.floor(round_freq)
diff = dt.asi8 - result.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"floor not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "floor error"
# test ceil
result = dt.ceil(round_freq)
diff = result.asi8 - dt.asi8
mod = result.asi8 % unit
assert (mod == 0).all(), f"ceil not a {round_freq} multiple"
assert (0 <= diff).all() and (diff < unit).all(), "ceil error"
# test round
result = dt.round(round_freq)
diff = abs(result.asi8 - dt.asi8)
mod = result.asi8 % unit
assert (mod == 0).all(), f"round not a {round_freq} multiple"
assert (diff <= unit // 2).all(), "round error"
if unit % 2 == 0:
assert (
result.asi8[diff == unit // 2] % 2 == 0
).all(), "round half to even error"
# ----------------------------------------------------------------
# DatetimeIndex.normalize
def test_normalize(self):
rng = date_range("1/1/2000 9:30", periods=10, freq="D")
result = rng.normalize()
expected = date_range("1/1/2000", periods=10, freq="D")
tm.assert_index_equal(result, expected)
arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(
"datetime64[ns]"
)
rng_ns = DatetimeIndex(arr_ns)
rng_ns_normalized = rng_ns.normalize()
arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(
"datetime64[ns]"
)
expected = DatetimeIndex(arr_ns)
tm.assert_index_equal(rng_ns_normalized, expected)
assert result.is_normalized
assert not rng.is_normalized
def test_normalize_nat(self):
dti = DatetimeIndex([pd.NaT, Timestamp("2018-01-01 01:00:00")])
result = dti.normalize()
expected = DatetimeIndex([pd.NaT, Timestamp("2018-01-01")])
tm.assert_index_equal(result, expected)
class TestDateTimeIndexToJulianDate:
def test_1700(self):
dr = date_range(start=Timestamp("1710-10-01"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_2000(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="D")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_hour(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="H")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_minute(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="T")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
def test_second(self):
dr = date_range(start=Timestamp("2000-02-27"), periods=5, freq="S")
r1 = pd.Index([x.to_julian_date() for x in dr])
r2 = dr.to_julian_date()
assert isinstance(r2, pd.Float64Index)
tm.assert_index_equal(r1, r2)
|
py | 1a540d517cf75b7f6513261eef272f38ec43f3d3 | # This file is part of QuTiP: Quantum Toolbox in Python.
#
# Copyright (c) 2011 and later, All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the QuTiP: Quantum Toolbox in Python nor the names
# of its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
###############################################################################
import numbers
import numpy as np
import scipy.linalg as la
from .cy.interpolate import interp, arr_interp, zinterp, arr_zinterp
__all__ = ['Cubic_Spline']
class Cubic_Spline:
"""
Calculates coefficients for a cubic spline
interpolation of a given data set.
This function assumes that the data is sampled
uniformly over a given interval.
Parameters
----------
a : float
Lower bound of the interval.
b : float
Upper bound of the interval.
y : ndarray
Function values at interval points.
alpha : float
Second-order derivative at a. Default is 0.
beta : float
Second-order derivative at b. Default is 0.
Attributes
----------
a : float
Lower bound of the interval.
b : float
Upper bound of the interval.
coeffs : ndarray
Array of coeffcients defining cubic spline.
Notes
-----
This object can be called like a normal function with a
single or array of input points at which to evaluate
the interplating function.
Habermann & Kindermann, "Multidimensional Spline Interpolation:
Theory and Applications", Comput Econ 30, 153 (2007).
"""
def __init__(self, a, b, y, alpha=0, beta=0):
y = np.asarray(y)
n = y.shape[0] - 1
h = (b - a)/n
coeff = np.zeros(n + 3, dtype=y.dtype)
# Solutions to boundary coeffcients of spline
coeff[1] = 1/6 * (y[0] - (alpha*h*h)/6) # C2 in paper
coeff[n + 1] = 1/6 * (y[n] - (beta*h*h)/6) # cn+2 in paper
# Compressed tridiagonal matrix
ab = np.ones((3, n - 1), dtype=float)
ab[0, 0] = 0 # Because top row is upper diag with one less elem
ab[1, :] = 4
ab[-1, -1] = 0 # Because bottom row is lower diag with one less elem
B = y[1:-1].copy() # grabs elements y[1] - > y[n-2] for reduced array
B[0] -= coeff[1]
B[-1] -= coeff[n + 1]
coeff[2:-2] = la.solve_banded((1, 1), ab, B,
overwrite_ab=True,
overwrite_b=True,
check_finite=False)
coeff[0] = alpha*h*h/6. + 2*coeff[1] - coeff[2]
coeff[-1] = beta*h*h/6. + 2*coeff[-2] - coeff[-3]
self.a = a # Lower-bound of domain
self.b = b # Uppser-bound of domain
self.coeffs = coeff # Spline coefficients
self.is_complex = y.dtype == complex # Tells which dtype solver to use
self.array = y
self.bounds = (alpha, beta)
def __call__(self, pnts, *args):
# If requesting a single return value
if isinstance(pnts, numbers.Number):
if self.is_complex:
return zinterp(pnts, self.a, self.b, self.coeffs)
else:
return interp(pnts, self.a, self.b, self.coeffs)
# If requesting multiple return values from array_like
elif isinstance(pnts, (np.ndarray, list)):
pnts = np.asarray(pnts)
if self.is_complex:
return arr_zinterp(pnts, self.a, self.b, self.coeffs)
return arr_interp(pnts, self.a, self.b, self.coeffs)
raise TypeError
|
py | 1a540e8d050b4c621923140a81a1cbce0129a5cb | import requests
import json
import os
from github import Github
BASE = """---
id: default_repositories
title: Default repositories
description: "Default repositories in HACS"
---
<!-- The content of this file is autogenerated during build with script/generate_default_repositories.py -->
"""
github = Github(os.environ['TOKEN'])
integration_org = github.get_organization("custom-components")
plugin_org = github.get_organization("custom-cards")
theme_org = github.get_organization("home-assistant-community-themes")
blacklist = requests.get('https://raw.githubusercontent.com/hacs/default/master/blacklist')
blacklist = json.loads(blacklist.text.lower())
for category in ["integration", "plugin", "appdaemon", "python_script", "theme"]:
response = requests.get(f'https://raw.githubusercontent.com/hacs/default/master/{category}')
repos = json.loads(response.text.lower())
if category == "integration":
for repo in list(integration_org.get_repos()):
repos.append(repo.full_name.lower())
elif category == "plugin":
for repo in list(plugin_org.get_repos()):
repos.append(repo.full_name.lower())
elif category == "theme":
for repo in list(theme_org.get_repos()):
repos.append(repo.full_name.lower())
for repo in repos:
if repo in blacklist:
repos.remove(repo)
title = category.replace("_", " ").title() + 's' if category != 'appdaemon' else 'AppDaemon Apps'
BASE += f"\n## {title}\n\n"
BASE += f"_{len(repos)} Repositories in total._\n\n"
for repo in sorted(repos):
BASE += f"<p className='defaultrepo'><a href='https://github.com/{repo}' target='_blank'>{repo}</a></p>\n"
with open("documentation/default_repositories.md", "w") as mdfile:
mdfile.write(BASE)
|
py | 1a540f585219cfb3e0c97d84f0c6b4c9d4c0efdf | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'GetChannelResult',
'AwaitableGetChannelResult',
'get_channel',
]
@pulumi.output_type
class GetChannelResult:
"""
Bot channel resource definition
"""
def __init__(__self__, etag=None, id=None, kind=None, location=None, name=None, properties=None, sku=None, tags=None, type=None):
if etag and not isinstance(etag, str):
raise TypeError("Expected argument 'etag' to be a str")
pulumi.set(__self__, "etag", etag)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if kind and not isinstance(kind, str):
raise TypeError("Expected argument 'kind' to be a str")
pulumi.set(__self__, "kind", kind)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if properties and not isinstance(properties, dict):
raise TypeError("Expected argument 'properties' to be a dict")
pulumi.set(__self__, "properties", properties)
if sku and not isinstance(sku, dict):
raise TypeError("Expected argument 'sku' to be a dict")
pulumi.set(__self__, "sku", sku)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter
def etag(self) -> Optional[str]:
"""
Entity Tag
"""
return pulumi.get(self, "etag")
@property
@pulumi.getter
def id(self) -> str:
"""
Specifies the resource ID.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def kind(self) -> Optional[str]:
"""
Required. Gets or sets the Kind of the resource.
"""
return pulumi.get(self, "kind")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Specifies the location of the resource.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Specifies the name of the resource.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> Any:
"""
The set of properties specific to bot channel resource
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def sku(self) -> Optional['outputs.SkuResponse']:
"""
Gets or sets the SKU of the resource.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Contains resource tags defined as key/value pairs.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Specifies the type of the resource.
"""
return pulumi.get(self, "type")
class AwaitableGetChannelResult(GetChannelResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetChannelResult(
etag=self.etag,
id=self.id,
kind=self.kind,
location=self.location,
name=self.name,
properties=self.properties,
sku=self.sku,
tags=self.tags,
type=self.type)
def get_channel(channel_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetChannelResult:
"""
Bot channel resource definition
:param str channel_name: The name of the Bot resource.
:param str resource_group_name: The name of the Bot resource group in the user subscription.
:param str resource_name: The name of the Bot resource.
"""
__args__ = dict()
__args__['channelName'] = channel_name
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:botservice/v20200602:getChannel', __args__, opts=opts, typ=GetChannelResult).value
return AwaitableGetChannelResult(
etag=__ret__.etag,
id=__ret__.id,
kind=__ret__.kind,
location=__ret__.location,
name=__ret__.name,
properties=__ret__.properties,
sku=__ret__.sku,
tags=__ret__.tags,
type=__ret__.type)
|
py | 1a540fbf49e695510ad259c1c1b1198c0f14c139 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
sum = 0
def sumRootToLeaf(self, root: TreeNode) -> int:
def dfs(root, path_sum):
if not root:
return
path_sum = (path_sum << 1) + root.val
if not root.left and not root.right:
self.sum += path_sum
return
dfs(root.left, path_sum)
dfs(root.right, path_sum)
dfs(root, 0)
return self.sum
|
py | 1a541057a612a89d2ed638d6321cda5739b6a7e1 | # Copyright (c) 2018, NVIDIA CORPORATION.
"""
Test method that apply GPU kernel to a frame.
"""
import numpy as np
import pytest
from numba import cuda
from cudf import DataFrame
from cudf.tests.utils import assert_eq
@pytest.mark.parametrize("nelem", [1, 2, 64, 128, 129])
def test_df_apply_rows(nelem):
def kernel(in1, in2, in3, out1, out2, extra1, extra2):
for i, (x, y, z) in enumerate(zip(in1, in2, in3)):
out1[i] = extra2 * x - extra1 * y
out2[i] = y - extra1 * z
df = DataFrame()
df["in1"] = in1 = np.arange(nelem)
df["in2"] = in2 = np.arange(nelem)
df["in3"] = in3 = np.arange(nelem)
extra1 = 2.3
extra2 = 3.4
expect_out1 = extra2 * in1 - extra1 * in2
expect_out2 = in2 - extra1 * in3
outdf = df.apply_rows(
kernel,
incols=["in1", "in2", "in3"],
outcols=dict(out1=np.float64, out2=np.float64),
kwargs=dict(extra1=extra1, extra2=extra2),
)
got_out1 = outdf["out1"].to_array()
got_out2 = outdf["out2"].to_array()
np.testing.assert_array_almost_equal(got_out1, expect_out1)
np.testing.assert_array_almost_equal(got_out2, expect_out2)
@pytest.mark.parametrize("nelem", [1, 2, 64, 128, 129])
@pytest.mark.parametrize("chunksize", [1, 2, 3, 4, 23])
def test_df_apply_chunks(nelem, chunksize):
def kernel(in1, in2, in3, out1, out2, extra1, extra2):
for i, (x, y, z) in enumerate(zip(in1, in2, in3)):
out1[i] = extra2 * x - extra1 * y + z
out2[i] = i
df = DataFrame()
df["in1"] = in1 = np.arange(nelem)
df["in2"] = in2 = np.arange(nelem)
df["in3"] = in3 = np.arange(nelem)
extra1 = 2.3
extra2 = 3.4
expect_out1 = extra2 * in1 - extra1 * in2 + in3
expect_out2 = np.arange(len(df)) % chunksize
outdf = df.apply_chunks(
kernel,
incols=["in1", "in2", "in3"],
outcols=dict(out1=np.float64, out2=np.int32),
kwargs=dict(extra1=extra1, extra2=extra2),
chunks=chunksize,
)
got_out1 = outdf["out1"]
got_out2 = outdf["out2"]
np.testing.assert_array_almost_equal(got_out1, expect_out1)
np.testing.assert_array_almost_equal(got_out2, expect_out2)
@pytest.mark.parametrize("nelem", [1, 15, 30, 64, 128, 129])
def test_df_apply_custom_chunks(nelem):
def kernel(in1, in2, in3, out1, out2, extra1, extra2):
for i, (x, y, z) in enumerate(zip(in1, in2, in3)):
out1[i] = extra2 * x - extra1 * y + z
out2[i] = i
df = DataFrame()
df["in1"] = in1 = np.arange(nelem)
df["in2"] = in2 = np.arange(nelem)
df["in3"] = in3 = np.arange(nelem)
chunks = [0, 7, 11, 29, 101, 777]
chunks = [c for c in chunks if c < nelem]
extra1 = 2.3
extra2 = 3.4
expect_out1 = extra2 * in1 - extra1 * in2 + in3
expect_out2 = np.hstack(
np.arange((e - s)) for s, e in zip(chunks, chunks[1:] + [len(df)])
)
outdf = df.apply_chunks(
kernel,
incols=["in1", "in2", "in3"],
outcols=dict(out1=np.float64, out2=np.int32),
kwargs=dict(extra1=extra1, extra2=extra2),
chunks=chunks,
)
got_out1 = outdf["out1"]
got_out2 = outdf["out2"]
np.testing.assert_array_almost_equal(got_out1, expect_out1)
np.testing.assert_array_almost_equal(got_out2, expect_out2)
@pytest.mark.parametrize("nelem", [1, 15, 30, 64, 128, 129])
@pytest.mark.parametrize("blkct", [None, 1, 8])
@pytest.mark.parametrize("tpb", [1, 8, 64])
def test_df_apply_custom_chunks_blkct_tpb(nelem, blkct, tpb):
def kernel(in1, in2, in3, out1, out2, extra1, extra2):
for i in range(cuda.threadIdx.x, in1.size, cuda.blockDim.x):
x = in1[i]
y = in2[i]
z = in3[i]
out1[i] = extra2 * x - extra1 * y + z
out2[i] = i * cuda.blockDim.x
df = DataFrame()
df["in1"] = in1 = np.arange(nelem)
df["in2"] = in2 = np.arange(nelem)
df["in3"] = in3 = np.arange(nelem)
chunks = [0, 7, 11, 29, 101, 777]
chunks = [c for c in chunks if c < nelem]
extra1 = 2.3
extra2 = 3.4
expect_out1 = extra2 * in1 - extra1 * in2 + in3
expect_out2 = np.hstack(
tpb * np.arange((e - s))
for s, e in zip(chunks, chunks[1:] + [len(df)])
)
outdf = df.apply_chunks(
kernel,
incols=["in1", "in2", "in3"],
outcols=dict(out1=np.float64, out2=np.int32),
kwargs=dict(extra1=extra1, extra2=extra2),
chunks=chunks,
blkct=blkct,
tpb=tpb,
)
got_out1 = outdf["out1"]
got_out2 = outdf["out2"]
np.testing.assert_array_almost_equal(got_out1, expect_out1)
np.testing.assert_array_almost_equal(got_out2, expect_out2)
@pytest.mark.parametrize("nelem", [1, 2, 64, 128, 1000, 5000])
def test_df_apply_rows_incols_mapping(nelem):
def kernel(x, y, z, out1, out2, extra1, extra2):
for i, (a, b, c) in enumerate(zip(x, y, z)):
out1[i] = extra2 * a - extra1 * b
out2[i] = b - extra1 * c
df = DataFrame()
df["in1"] = in1 = np.arange(nelem)
df["in2"] = in2 = np.arange(nelem)
df["in3"] = in3 = np.arange(nelem)
extra1 = 2.3
extra2 = 3.4
expected_out = DataFrame()
expected_out["out1"] = extra2 * in1 - extra1 * in2
expected_out["out2"] = in2 - extra1 * in3
outdf = df.apply_rows(
kernel,
incols={"in1": "x", "in2": "y", "in3": "z"},
outcols=dict(out1=np.float64, out2=np.float64),
kwargs=dict(extra1=extra1, extra2=extra2),
)
assert_eq(outdf[["out1", "out2"]], expected_out)
@pytest.mark.parametrize("nelem", [1, 2, 64, 128, 129])
@pytest.mark.parametrize("chunksize", [1, 2, 3, 4, 23])
def test_df_apply_chunks_incols_mapping(nelem, chunksize):
def kernel(q, p, r, out1, out2, extra1, extra2):
for i, (a, b, c) in enumerate(zip(q, p, r)):
out1[i] = extra2 * a - extra1 * b + c
out2[i] = i
df = DataFrame()
df["in1"] = in1 = np.arange(nelem)
df["in2"] = in2 = np.arange(nelem)
df["in3"] = in3 = np.arange(nelem)
extra1 = 2.3
extra2 = 3.4
expected_out = DataFrame()
expected_out["out1"] = extra2 * in1 - extra1 * in2 + in3
expected_out["out2"] = np.arange(len(df)) % chunksize
outdf = df.apply_chunks(
kernel,
incols={"in1": "q", "in2": "p", "in3": "r"},
outcols=dict(out1=np.float64, out2=np.int64),
kwargs=dict(extra1=extra1, extra2=extra2),
chunks=chunksize,
)
assert_eq(outdf[["out1", "out2"]], expected_out)
|
py | 1a54108328f18c305d47f88a20d9e51ca80a9df1 | def Main(a: int) -> int:
return # compiler error - expecting int value
|
py | 1a541149ef61dbe8a06ae6f3bf8cb6dc492da103 | """Balkan URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('',include('home.urls'))
]
|
py | 1a54132c1d08425fbf13a0b286a5de5f660debae | exp = str(input('Digite uma expressão: '))
pilha = []
for simb in exp:
if simb == '(':
pilha.append('(')
elif simb == ')':
if len(pilha) > 0:
pilha.pop()
else:
pilha.append(')')
break
if len(pilha) == 0:
print('Sua expressão está válida!')
else:
print('Sua expressão está inválida!')
|
py | 1a541334b795adfcd0539265b4d2661ea83f20d7 | # -*- coding: utf-8 -*-
"""
Class definition of YOLO_v3 style detection model on image and video
"""
import colorsys
import os
from timeit import default_timer as timer
import numpy as np
import tensorflow.compat.v1.keras.backend as K
from tensorflow.compat.v1.keras.backend import get_session
from tensorflow.keras.models import load_model
from tensorflow.keras.layers import Input
from PIL import Image, ImageFont, ImageDraw
from yolo3.model import yolo_eval, yolo_body, tiny_yolo_body
from yolo3.utils import letterbox_image
import os
from tensorflow.python.keras.utils.multi_gpu_utils import multi_gpu_model
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
class YOLO(object):
_defaults = {
"model_path": 'model_data/test.h5',
"anchors_path": 'model_data/yolo_anchors.txt',
"classes_path": 'model_data/coco_classes.txt',
"score" : 0.3,
"iou" : 0.45,
"model_image_size" : (416, 416),
"gpu_num" : 1,
}
@classmethod
def get_defaults(cls, n):
if n in cls._defaults:
return cls._defaults[n]
else:
return "Unrecognized attribute name '" + n + "'"
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = get_session()
self.boxes, self.scores, self.classes = self.generate()
def _get_class(self):
classes_path = os.path.expanduser(self.classes_path)
with open(classes_path) as f:
class_names = f.readlines()
class_names = [c.strip() for c in class_names]
return class_names
def _get_anchors(self):
anchors_path = os.path.expanduser(self.anchors_path)
with open(anchors_path) as f:
anchors = f.readline()
anchors = [float(x) for x in anchors.split(',')]
return np.array(anchors).reshape(-1, 2)
def generate(self):
model_path = os.path.expanduser(self.model_path)
assert model_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
# Load model, or construct model and load weights.
num_anchors = len(self.anchors)
num_classes = len(self.class_names)
is_tiny_version = num_anchors==6 # default setting
try:
self.yolo_model = load_model(model_path, compile=False)
except:
self.yolo_model = tiny_yolo_body(Input(shape=(None,None,3)), num_anchors//2, num_classes) \
if is_tiny_version else yolo_body(Input(shape=(None,None,3)), num_anchors//3, num_classes)
self.yolo_model.load_weights(self.model_path) # make sure model, anchors and classes match
else:
assert self.yolo_model.layers[-1].output_shape[-1] == \
num_anchors/len(self.yolo_model.output) * (num_classes + 5), \
'Mismatch between model and given anchor and class sizes'
print('{} model, anchors, and classes loaded.'.format(model_path))
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
# Generate output tensor targets for filtered bounding boxes.
self.input_image_shape = K.placeholder(shape=(2, ))
if self.gpu_num>=2:
self.yolo_model = multi_gpu_model(self.yolo_model, gpus=self.gpu_num)
boxes, scores, classes = yolo_eval(self.yolo_model.output, self.anchors,
len(self.class_names), self.input_image_shape,
score_threshold=self.score, iou_threshold=self.iou)
return boxes, scores, classes
def detect_image(self, image):
start = timer()
if self.model_image_size != (None, None):
assert self.model_image_size[0]%32 == 0, 'Multiples of 32 required'
assert self.model_image_size[1]%32 == 0, 'Multiples of 32 required'
boxed_image = letterbox_image(image, tuple(reversed(self.model_image_size)))
else:
new_image_size = (image.width - (image.width % 32),
image.height - (image.height % 32))
boxed_image = letterbox_image(image, new_image_size)
image_data = np.array(boxed_image, dtype='float32')
print(image_data.shape)
image_data /= 255.
image_data = np.expand_dims(image_data, 0) # Add batch dimension.
out_boxes, out_scores, out_classes = self.sess.run(
[self.boxes, self.scores, self.classes],
feed_dict={
self.yolo_model.input: image_data,
self.input_image_shape: [image.size[1], image.size[0]],
K.learning_phase(): 0
})
print('Found {} boxes for {}'.format(len(out_boxes), 'img'))
font = ImageFont.truetype(font='font/FiraMono-Medium.otf',
size=np.floor(3e-2 * image.size[1] + 0.5).astype('int32'))
thickness = (image.size[0] + image.size[1]) // 300
for i, c in reversed(list(enumerate(out_classes))):
predicted_class = self.class_names[c]
box = out_boxes[i]
score = out_scores[i]
label = '{} {:.2f}'.format(predicted_class, score)
draw = ImageDraw.Draw(image)
label_size = draw.textsize(label, font)
top, left, bottom, right = box
top = max(0, np.floor(top + 0.5).astype('int32'))
left = max(0, np.floor(left + 0.5).astype('int32'))
bottom = min(image.size[1], np.floor(bottom + 0.5).astype('int32'))
right = min(image.size[0], np.floor(right + 0.5).astype('int32'))
print(label, (left, top), (right, bottom))
if top - label_size[1] >= 0:
text_origin = np.array([left, top - label_size[1]])
else:
text_origin = np.array([left, top + 1])
# My kingdom for a good redistributable image drawing library.
for i in range(thickness):
draw.rectangle(
[left + i, top + i, right - i, bottom - i],
outline=self.colors[c])
draw.rectangle(
[tuple(text_origin), tuple(text_origin + label_size)],
fill=self.colors[c])
draw.text(text_origin, label, fill=(0, 0, 0), font=font)
del draw
end = timer()
print(end - start)
return image
def close_session(self):
self.sess.close()
def detect_video(yolo, video_path, output_path=""):
import cv2
vid = cv2.VideoCapture(video_path)
if not vid.isOpened():
raise IOError("Couldn't open webcam or video")
video_FourCC = int(vid.get(cv2.CAP_PROP_FOURCC))
video_fps = vid.get(cv2.CAP_PROP_FPS)
video_size = (int(vid.get(cv2.CAP_PROP_FRAME_WIDTH)),
int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT)))
isOutput = True if output_path != "" else False
if isOutput:
print("!!! TYPE:", type(output_path), type(video_FourCC), type(video_fps), type(video_size))
out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
accum_time = 0
curr_fps = 0
fps = "FPS: ??"
prev_time = timer()
while True:
return_value, frame = vid.read()
image = Image.fromarray(frame)
image = yolo.detect_image(image)
result = np.asarray(image)
curr_time = timer()
exec_time = curr_time - prev_time
prev_time = curr_time
accum_time = accum_time + exec_time
curr_fps = curr_fps + 1
if accum_time > 1:
accum_time = accum_time - 1
fps = "FPS: " + str(curr_fps)
curr_fps = 0
cv2.putText(result, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
fontScale=0.50, color=(255, 0, 0), thickness=2)
cv2.namedWindow("result", cv2.WINDOW_NORMAL)
cv2.imshow("result", result)
if isOutput:
out.write(result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
yolo.close_session()
|
py | 1a5413fe61e96a68407c79ec0f1c37171bf91d8f | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'DjangoRESTAPI.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
|
py | 1a541422b03a96e0bbcc2d8aedb5734a04d35a92 | # to run this script you need a DL1 debug files of hipeRTA and a DL1 file from lstchain from the same run
import tables
import numpy as np
import matplotlib.pyplot as plt
from ctapipe.visualization import CameraDisplay
from ctapipe.instrument import CameraGeometry
from ctapipe.image import tailcuts_clean
from lstchain.io.io import dl1_images_lstcam_key, dl1_params_lstcam_key
from astropy.table import Table, join, Column, hstack
from ctapipe.io.containers import HillasParametersContainer
from matplotlib.backends.backend_pdf import PdfPages
from datetime import date
import astropy.units as u
from astropy.coordinates import Angle
import argparse
def tailcuts_clean_teltype(image, camera_name='LSTCam', **kwargs):
return tailcuts_clean(geom, image, **kwargs)
def get_hillas_container(row):
h = HillasParametersContainer()
h.x = row['x'] * 28 * u.m
h.y = row['y'] * 28 * u.m
h.r = row['r'] * 28 * u.m
h.phi = Angle(row['phi'] * u.rad)
h.width = row['width'] * u.m
h.length = row['length'] * u.m
h.psi = Angle(row['psi'] * u.rad)
h.skewness = row['skewness']
h.kurtosis = row['kurtosis']
return h
dl1_hipecta_filename = '/fefs/aswg/workspace/thomas.vuillaume/mchdf5/run1/dl1_6_3_2_gamma_20deg_180deg_run1___cta-prod3-demo-2147m-LaPalma-baseline-mono_off0.4.h5'
dl1_lstchain_filename = '/fefs/aswg/workspace/thomas.vuillaume/mchdf5/run1/lstchain/GlobalPeakIntegrator/dl1_gamma_20deg_180deg_run1___cta-prod3-demo-2147m-LaPalma-baseline-mono_off0.4.simtel.h5'
def main(dl1_hipecta_filename, dl1_lstchain_filename):
geom = CameraGeometry.from_name('LSTCam')
dl1_hipecta = tables.open_file(dl1_hipecta_filename)
dl1_lstchain = tables.open_file(dl1_lstchain_filename)
with tables.open_file(dl1_hipecta_filename) as dl1_hipecta:
hipecta_images = Table(dl1_hipecta.root.dl1.Tel_1.calib_pic.read())
hipecta_parameters = Table(dl1_hipecta.root.dl1.Tel_1.parameters.read())
with tables.open_file(dl1_lstchain_filename) as dl1_lstchain:
simu_table = Table(dl1_lstchain.root.dl1.event.simulation.LST_LSTCam.read())
lstchain_images = Table(dl1_lstchain.root[dl1_images_lstcam_key].read())
hipecta = join(hipecta_images, hipecta_parameters, keys='event_id')
lstchain_table = hstack([lstchain_images, simu_table], join_type='exact')
lstchain_table.rename_column('tel_id_1', 'tel_id')
lstchain_table.remove_column('tel_id_2')
mega_table = join(lstchain_table[lstchain_table['tel_id']==1],
hipecta,
uniq_col_name='{table_name}_{col_name}',
table_names = ['lstchain', 'hipecta'],
keys='event_id'
)
selected_table = mega_table[:30]
params_cleaning = dict(picture_thresh=6,
boundary_thresh=3,
keep_isolated_pixels=False,
min_number_picture_neighbors=2)
lstchain_cleaning = np.apply_along_axis(tailcuts_clean_teltype, selected_table['image'], **params_cleaning)
selected_table.add_column(Column(lstchain_cleaning, dtype=int), name='lstchain_clean_mask')
with PdfPages(f'compare_lstchain_hipecta_images_{date.today()}.pdf') as pp:
for ii, row in enumerate(selected_table[:10]):
print(f"{ii}. event id : {row['event_id']}")
# print(row)
h = get_hillas_container(row)
image_lstchain = row['image']
image_hipecta = row['signal']
clean_mask_ctapipe_on_lstchain = row['lstchain_clean_mask']
clean_mask_ctapipe_on_hipecta = tailcuts_clean(geom, image_hipecta, **params_cleaning)
clean_mask_hipecta = row['clean_mask'].astype(bool)
fig, axes = plt.subplots(2,3, figsize=(12,6))
# axes[0,2].remove()
display = CameraDisplay(geom, image_lstchain, ax=axes[0,0])
display.add_colorbar(ax=axes[0,0])
axes[0,0].set_title('lstchain image')
display = CameraDisplay(geom, clean_mask_ctapipe_on_lstchain, ax=axes[0,1])
# display.add_colorbar(ax=axes[0,1])
display.highlight_pixels(clean_mask_ctapipe_on_lstchain.astype(bool), color='red')
axes[0,1].set_title('lstchain clean mask')
display = CameraDisplay(geom, image_hipecta, ax=axes[1,0])
display.add_colorbar(ax=axes[1,0])
axes[1,0].set_title('hipecta image')
display = CameraDisplay(geom, clean_mask_hipecta, ax=axes[1,1])
# display.add_colorbar(ax=axes[1,1])
display.highlight_pixels(clean_mask_ctapipe_on_hipecta, color='red')
axes[1,1].set_title('hipecta clean mask')
axes[1,1].text(0.88,0.88,s='cleaning mask\nfrom ctapipe',color='red')
axes[1,1].text(-1.5, 0.88, s=f'n_islands={row["n_islands"]}', color='black')
display.overlay_moments(h)
display = CameraDisplay(geom, row['photo_electron_image'], ax=axes[0,2])
display.add_colorbar(ax=axes[0,2])
axes[0,2].set_title('true pe image')
display.highlight_pixels(clean_mask_ctapipe_on_lstchain.astype(bool), color='red')
axes[0,2].text(0.88, 0.88, s='cleaning mask\nfrom ctapipe', color='red')
display = CameraDisplay(geom, row['photo_electron_image'], ax=axes[1,2])
display.add_colorbar(ax=axes[1,2])
axes[1,2].set_title('true pe image')
display.highlight_pixels(clean_mask_hipecta, color='red')
axes[1,2].text(0.88,0.88,s='cleaning mask\nfrom hipecta',color='red')
plt.tight_layout()
pp.savefig(dpi=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Reconstruct events")
parser.add_argument('--dl1_lstchain', '-lst',
type=str,
dest='dl1_lstchain_filename',
help='path to the lstchain DL1 file',
default=dl1_lstchain_filename)
parser.add_argument('--dl1_hipecta', '-hipe',
type=str,
dest='dl1_hipecta_filename',
help='path to the hiperta debug DL1 file',
default=dl1_hipecta_filename)
args = parser.parse_args()
main(args.dl1_hipecta_filename, args.dl1_lstchain_filename)
|
py | 1a541580f43b3f9586f3280b34acfd4350d1dc3e | # Crie um programa onde o usuario possa
# digitar _sete valores numericos_ e
# cadastre-os em uma _lista unica_ que mantenha
# separados of valores _pares_ e _impares_
# No final, mostre os valores pares e impares
# em ordem crescente
print('Me de sete valores por favor')
valores = [[], []]
for num in range(7):
valor = int(input(f'Me de valor {num + 1}: '))
if valor % 2 == 0:
valores[0].append(valor)
else:
valores[1].append(valor)
print(f'Beleza na Veneza? Os pares são: {sorted(valores[0])}')
print(f'Os impares são: {sorted(valores[1])}')
|
py | 1a5416b7e3b30f517e04a0213800fb118efe7f90 | from enum import Enum
from spectroscope.model.update import Action
from spectroscope.model.database import RaiseUpdateKeys
from spectroscope.module import ConfigOption, Plugin
from spectroscope.constants import enums
import spectroscope
from typing import List
from pymongo import MongoClient, UpdateOne, DeleteOne
from pymongo.results import BulkWriteResult
from pymongo.errors import ConnectionFailure
log = spectroscope.log()
import spectroscope
log = spectroscope.log()
class Mongodb(Plugin):
_consumed_types = [RaiseUpdateKeys]
config_options = [
ConfigOption(
name="uri_endpoint",
param_type=str,
description="Endpoint to database server",
),
ConfigOption(
name="rs_name",
param_type=str,
description="replica set name of the mongodb",
),
ConfigOption(
name="db_name",
param_type=str,
description="Name of database",
),
ConfigOption(
name="col_name",
param_type=str,
description="Name of collection",
),
]
def __init__(self, uri_endpoint: str, rs_name: str, db_name: str, col_name: str):
try:
self._client = MongoClient(uri_endpoint, replicaset=rs_name)
self._database = self._client[db_name]
self._collection = self._database[col_name]
except ConnectionFailure as e:
log.error("failed to connect to {}. {}".format(self.uri_endpoint, e))
raise e
self._handlers = {RaiseUpdateKeys: self._action}
@classmethod
def register(cls, **kwargs):
return cls(
uri_endpoint=kwargs["uri_endpoint"],
rs_name=kwargs["rs_name"],
db_name=kwargs.get("db_name", "spectroscope"),
col_name=kwargs.get("col_name", "validators"),
)
def _create_updates(self, validator_keys: List[str], status: int):
request = []
for key in validator_keys:
request.append(
UpdateOne(
{"_id": key},
{"$setOnInsert": {"_id": key, "status": status}},
upsert=True,
)
)
return request
def _create_deletions(self, validator_keys: List[str]):
request = []
for key in validator_keys:
request.append(DeleteOne({"_id": key}))
return request
def _add(self, validator_keys: List[str], status: int):
result = self._collection.bulk_write(
self._create_updates(validator_keys, status), ordered=False
)
if not result.acknowledged:
return []
return result.upserted_count
def _up(self, validator_keys: List[str], status: int):
result = self._collection.bulk_write(
self._create_updates(validator_keys, status), ordered=False
)
if not result.acknowledged:
return []
return result.modified_count
def _del(self, validator_keys: List[str], status: int):
result = self._collection.bulk_write(
self._create_deletions(validator_keys), ordered=False
)
if not result.acknowledged:
return []
return result.deleted_count
def _get(self, validator_keys: List[str], status: int):
validators = []
if not validator_keys:
validators = self._collection.find({}, {"validator_key": 1})
else:
validators = self._collection.find(
{"validator_key": {"$in": validator_keys}}, {"validator_key": 1}
)
return [x["validator_key"] for x in validators]
def _action(
self, validator_keys: List[str], status: int, update_type: int, **kwargs
):
if enums.RequestTypes.ADD.value == update_type:
return self._add(validator_keys, status)
elif enums.RequestTypes.UP.value == update_type:
return self._up(validator_keys, status)
elif enums.RequestTypes.DEL.value == update_type:
return self._del(validator_keys, status)
elif enums.RequestTypes.GET.value == update_type:
return self._get(validator_keys, status)
def consume(self, events: List[Action]):
result = []
for event in events:
result.append(self._handlers[type(event)](**event.update.get_dict()))
return result
|
py | 1a54177c182ef465365a79f7f2fbb1c19191b954 |
from setuptools import setup, find_packages
version = '5.3.4'
setup(
name="alerta-hipchat",
version=version,
description='Alerta plugin for HipChat',
url='https://github.com/alerta/alerta-contrib',
license='MIT',
author='Nick Satterly',
author_email='[email protected]',
packages=find_packages(),
py_modules=['alerta_hipchat'],
install_requires=[
'requests',
'jinja2'
],
include_package_data=True,
zip_safe=True,
entry_points={
'alerta.plugins': [
'hipchat = alerta_hipchat:SendRoomNotification'
]
}
)
|
py | 1a5417b2f6c4a233b4985beeba261e65fbb41faf | from django.shortcuts import render
# Create your views here.
from django.views import View
from django.shortcuts import render
from goods.models import GoodsChannel, GoodsCategory
from .models import ContentCategory
from .utils import get_categories
class IndexView(View):
def get(self, request):
"""提供首页广告界面"""
# 查询商品频道和分类
categories = get_categories()
# 广告数据
contents = {}
content_categories = ContentCategory.objects.all()
for cat in content_categories:
contents[cat.key] = cat.content_set.filter(status=True).order_by('sequence')
# 渲染模板的上下文
context = {
'categories': categories,
'contents': contents
}
return render(request, 'index.html', context) |
py | 1a54180568baa827540e6a02ebf98cd98f196d22 | from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
# User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
# Register serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(
validated_data['username'],
validated_data['email'],
validated_data['password'])
return user
# Login serializer
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
print(2)
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect Credentials")
|
py | 1a54189671ab14c85829f401e87a33e19abf742e | # coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: release-1.16
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1ClientIPConfig(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'timeout_seconds': 'int'
}
attribute_map = {
'timeout_seconds': 'timeoutSeconds'
}
def __init__(self, timeout_seconds=None, local_vars_configuration=None): # noqa: E501
"""V1ClientIPConfig - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._timeout_seconds = None
self.discriminator = None
if timeout_seconds is not None:
self.timeout_seconds = timeout_seconds
@property
def timeout_seconds(self):
"""Gets the timeout_seconds of this V1ClientIPConfig. # noqa: E501
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours). # noqa: E501
:return: The timeout_seconds of this V1ClientIPConfig. # noqa: E501
:rtype: int
"""
return self._timeout_seconds
@timeout_seconds.setter
def timeout_seconds(self, timeout_seconds):
"""Sets the timeout_seconds of this V1ClientIPConfig.
timeoutSeconds specifies the seconds of ClientIP type session sticky time. The value must be >0 && <=86400(for 1 day) if ServiceAffinity == \"ClientIP\". Default value is 10800(for 3 hours). # noqa: E501
:param timeout_seconds: The timeout_seconds of this V1ClientIPConfig. # noqa: E501
:type: int
"""
self._timeout_seconds = timeout_seconds
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1ClientIPConfig):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1ClientIPConfig):
return True
return self.to_dict() != other.to_dict()
|
py | 1a5419295ff8a448eecc4200b2abcbb7eefd5507 | from django.test import TestCase
from django.contrib.auth import get_user_model
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@circle_instruments.com'
password = 'fake_password'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@UPPER_CASE.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'[email protected]',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
|
py | 1a5419ab1ef78e75f2b4e99566a754b1f0a4b80c | import re
import requests
from webpage import WebPage
from seocheck import SEOCheckExist, SEOCheckNotExist, SEOCheckLength, SEOCheckLengthBetween
from seocheckmanager import SEOCheckManager
from outputprinter import StandardPrinter, PDFPrinter
from test import Test
def get_key_or_default(dictionary, key, default=None):
try:
return dictionary[key]
except KeyError:
return default
class WebSite:
def __init__(self, start_url):
"""
Initialise an instance of WebSite
"""
self.start_url = start_url
m = re.match(r'^(http[s]?://[a-zA-Z0-9\.-]+/)', self.start_url)
if not m:
raise ValueError("'start_url' must be an URL starting by http:// or https://")
self.root_url = m.group(0) # has / at the end
self.seocheckmanager = SEOCheckManager()
# HTML
self.seocheckmanager.append(SEOCheckExist("html", "lang", "Missing LANG attribute for <HTML/>", "Setting this value can help you to get a better ranking based on the localization of the user who is using a search engine and the subdomain in use for this search engine. Results and positioning in google.com and google.co.uk are not the same"), 2)
# HEAD / LINK
self.seocheckmanager.append(SEOCheckExist("html > head > link[rel~=icon]", "href", "Missing FAVICON"), 3)
# HEAD / TITLE
self.seocheckmanager.append(SEOCheckExist("html > head > title", None, "Missing <TITLE/>"), 0)
self.seocheckmanager.append(SEOCheckLength("html > head > title", None, ">", 70, "Too long <TITLE/>"), 1)
# HEAD / META[description]
self.seocheckmanager.append(SEOCheckExist("html > head > meta[name=description]", "content", "Missing META for description"), 0)
self.seocheckmanager.append(SEOCheckLength("html > head > meta[name=description]", "content", "<", 50, "Too short META for description"), 1)
self.seocheckmanager.append(SEOCheckLength("html > head > meta[name=description]", "content", ">", 160, "Too long META for description"), 1)
self.seocheckmanager.append(SEOCheckLengthBetween("html > head > meta[name=description]", "content", 150, 160, "Recommended META for description: between 150 and 160"), 3)
# HEAD / META[robots]
self.seocheckmanager.append(SEOCheckExist("html > head > meta[name=robots]", "content", "Missing META for robots"), 1)
# H1
self.seocheckmanager.append(SEOCheckExist("h1", None, "Missing <H1/>"), 0)
# IMG
self.seocheckmanager.append(SEOCheckNotExist("img", "src", "(.+)", "Missing SRC attribute for <IMG/>", "src attribute should be specified on every <img/>"), 0)
self.seocheckmanager.append(SEOCheckNotExist("img", "alt", "(.+)", "Missing ALT attribute for <IMG/>"), 0)
self.seocheckmanager.append(SEOCheckLength("img", "alt", ">", 80, "Too long ALT attribute for <IMG/>"), 2)
# A
self.seocheckmanager.append(SEOCheckNotExist("a", "href", "(.+)", "Missing HREF attribute for <A/>", "<a/> links are used to create hyperlinks. href should be specified on every <a/> link"), 0)
self.seocheckmanager.append(SEOCheckNotExist("a[href^='/'] , a[href^='%s']" % self.root_url, None, "(.+)", "Missing visible/anchor text of <A/> (internal link)", "Anchor text of <a/> links is useful because it helps bots to understand what kind of page is targetted. It gives bots keywords that could be attributed to the webpage"), 2)
self.seocheckmanager.append(SEOCheckNotExist("a[href='#']", None, None, "Use of <a href='#' />", "For my part, I prefer using <a href='javascript:void(0);' onclick='...' /> instead of <a href='#' onclick='...' />. <a href='#' /> makes the page scrolling up when clicked"), 3)
# APPLET / IFRAME
self.seocheckmanager.append(SEOCheckNotExist("applet", "code", "(.+)", "Missing CODE attribute for <APPLET/>"), 3)
self.seocheckmanager.append(SEOCheckNotExist("iframe", "src", "(.+)", "Missing SRC attribute for <IFRAME/>"), 3)
# I / B
self.seocheckmanager.append(SEOCheckNotExist("i , b", "class", "(^| )(glyphicon)($| )", "Recommended: use <strong/> and <em/> instead of <i/> and <b/>"), 2)
def append(self, webpage):
"""
Append a webpage to the list of currently in use WebPages
/!\ Does not check if the page is already in the list
"""
webpage.id = len(self.webpages)
self.webpages.append(webpage)
self.url_to_id[webpage.url] = webpage.id
return webpage.id
def retrieve_webpage(self, from_wp, url, as_ressource=False):
"""
Return the corresponding WebPage object
Create it if it was not defined before
"""
extended = False
if url.startswith('//'):
url = "http:" + url
elif url.startswith('/'):
url = self.root_url + url[1:]
extended = True
elif not url.startswith("http"):
without_slashes = from_wp.url.split('?')[0].split('/')
url = '/'.join(without_slashes[:-1]) + '/' + url
try:
wp_id = self.url_to_id[url]
except KeyError:
wp = WebPage(url, from_wp.depth +1, extended | url.startswith(self.root_url))
wp_id = self.append(wp)
wp = self.webpages[wp_id]
if as_ressource:
wp.add_ressource_used_by(from_wp)
else:
wp.add_link_used_by(from_wp)
return wp
def scan(self, parameters):
"""
Scan the WebSite in order to report abnormal or non-optimal
coding choices
"""
email_address = get_key_or_default(parameters, "email")
nofollow = get_key_or_default(parameters, "nofollow", False)
noindex = get_key_or_default(parameters, "noindex", False)
deep = get_key_or_default(parameters, "deep", False)
num_retry = get_key_or_default(parameters, "num-retry", 0)
max_depth = get_key_or_default(parameters, "max-depth", 5)
color = get_key_or_default(parameters, "color", False)
# webpages contains the list of known pages
# pages that have been or should be seen during scan
# for max_depth=+infinity
self.webpages = list()
# faster lookup
self.url_to_id = dict()
self.append(WebPage(self.start_url))
# BFS parameters
cursor_webpages_pos = 0
# while we do not reach the maximal allowed depth or visit everything
while cursor_webpages_pos < len(self.webpages) and self.webpages[cursor_webpages_pos].depth <= max_depth:
# remove and return the head of the queue
webpage = self.webpages[cursor_webpages_pos]
webpage.scan(self, self.seocheckmanager, noindex, nofollow, deep, num_retry)
cursor_webpages_pos += 1
# TEST
tests = list()
# Robots.txt / Sitemaps
t_robots = Test("Missing robots.txt", "'robots.txt' file tells search engines whether they can access and therefore crawl parts of your site", 0)
error_robots = False
try:
robots_webpage = requests.get(self.root_url + "robots.txt", timeout=10)
except requests.ConnectionError:
error_robots = True
except requests.exceptions.Timeout:
error_robots = True
except requests.exceptions.InvalidSchema:
error_robots = True
if error_robots or robots_webpage.status_code != 200:
t_robots.set_passed(False)
tests.append(t_robots)
# Broken links check
# Good balance between internal/external links
t_brokenlinks = Test("Broken links", "Broken links affects your ranking", 0)
t_brokenlinks_in = Test("Broken links in", "Broken links have been detected in the following webpages", 0)
t_brokenressources_in = Test("Broken ressources in", "Broken ressources (image source, js script, css stylesheets) have been detected in the following webpages", 0)
t_duplicated_title = Test("Duplicated Title (on pages to be indexed)", "Webpages with identical titles are very harmful for the ranking", 1)
t_duplicated_description = Test("Duplicated Description (on pages to be indexed)", "Webpages with identical descriptions are very harmul for the ranking", 1)
t_internal_external_links = Test("Too many external links", "Some people believe that the number of external links should be inferior to the number of internal links. Choose your links properly in order to avoid becoming a directory for websites. You can also use rel='nofollow' attribute in order do remove their effects on your ranking", 3)
for webpage in self.webpages:
if webpage.status not in (200, 301, 302):
t_brokenlinks.append(webpage)
if noindex and webpage.noindex:
continue
if webpage.has_brokenlinks:
t_brokenlinks_in.append(webpage)
if webpage.has_brokenressources:
t_brokenressources_in.append(webpage)
if webpage.duplicated_title:
t_duplicated_title.append(webpage)
if webpage.duplicated_description:
t_duplicated_description.append(webpage)
if webpage.link_towards_ext and webpage.link_towards_int and len(webpage.link_towards_ext) > len(webpage.link_towards_int):
t_internal_external_links.append(webpage)
tests.append(t_brokenlinks)
tests.append(t_brokenlinks_in)
tests.append(t_brokenressources_in)
tests.append(t_duplicated_title)
tests.append(t_duplicated_description)
tests.append(t_internal_external_links)
# SEOCheck - local checks
seochecks_and_levels = self.seocheckmanager.get_check_list()
for check_and_level in seochecks_and_levels:
check = check_and_level[0]
level = check_and_level[1]
t_check = Test(check.get_title(), check.get_description(), level)
for webpage in self.webpages:
if noindex and webpage.noindex:
continue
check_dict = webpage.get_check_dict()
if not check_dict:
continue
if not check.check(check_dict):
t_check.append(webpage)
tests.append(t_check)
# Display results
failed_tests = list()
passed_tests = list()
for t in tests:
if t.get_passed():
passed_tests.append(t)
else:
failed_tests.append(t)
print ""
sprinter = StandardPrinter(color)
sprinter.render(self.webpages, failed_tests, passed_tests)
pdfprinter = PDFPrinter(self.root_url, "pdf.pdf", email_address)
pdfprinter.render(self.webpages, failed_tests, passed_tests)
|
py | 1a541ab5aa2b0e1c7712c890dc8a1225f15dfbfa | #!/usr/bin/env python3
import sqlite3
import sys
import pandas as pd
print("\n\nSTARTING DATABASE EXPORT...\n\n")
db = sqlite3.connect(sys.argv[1])
cursor1 = db.cursor()
cursor2 = db.cursor()
cursor1.execute("select * from proteins")
names = list(map(lambda x: x[0], cursor1.description))
samples = [i for i in names if i.startswith('SAF ')]
safdict={}
for s in samples:
cursor1.execute("select SUM(`{}`) from proteins;".format(s))
for row in cursor1.fetchall():
safdict[s] = row[0]
try:
cmd = "alter table proteins add column '{}' REAL".format('N' + s)
cursor1.execute(cmd)
db.commit()
except:
pass
try:
cmd = "alter table proteins add column Summed_NSAF REAL"
cursor1.execute(cmd)
db.commit()
except:
pass
scans = set()
try:
cursor1.execute('CREATE INDEX accession ON proteins(Accession);')
db.commit()
except:
pass
cursor1.execute('SELECT * FROM proteins')
names = list(map(lambda x: x[0], cursor1.description))
for row in cursor1:
cols = {}
for i in range(len(row)):
col = names[i]
val = row[i]
cols[col] = val
acc = cols['Accession']
summed_nsaf = 0
for col in cols:
val = cols[col]
if val is not None:
if col in safdict:
nsaf = val / safdict[col]
summed_nsaf += nsaf
nsaf_col = 'N' + col
cmd = "UPDATE proteins set `{}`={} where Accession = '{}';".format(nsaf_col, nsaf, acc)
cursor2.execute(cmd)
elif col == 'Scans':
scans.update([ i for i in val.split('\n') if i != ''])
cmd = "UPDATE proteins set Summed_NSAF={} where Accession = '{}';".format(summed_nsaf, acc)
cursor2.execute(cmd)
db.commit()
try:
cmd = "alter table proteins add column Protein_Prob REAL"
cursor1.execute(cmd)
db.commit()
except:
pass
try:
cmd = "alter table proteins add column Organism_Prob REAL"
cursor1.execute(cmd)
db.commit()
except:
pass
try:
cmd = "alter table proteins add column MSMS_Percent REAL"
cursor1.execute(cmd)
db.commit()
except:
pass
try:
cmd = "alter table proteins add column Combined_Prob REAL"
cursor1.execute(cmd)
db.commit()
except:
pass
mapped_scans = len(scans)
scans = set()
cursor1.execute('SELECT * FROM proteins ORDER BY Summed_NSAF DESC')
names = list(map(lambda x: x[0], cursor1.description))
prev = 0
accs = set()
for row in cursor1:
cols = {}
for i in range(len(row)):
col = names[i]
val = row[i]
cols[col] = val
acc = cols['Accession']
summed_nsaf = 0
for col in cols:
val = cols[col]
if col == 'Scans':
scans.update([ i for i in val.split('\n') if i != ''])
percent = len(scans)/float(mapped_scans) * 100
if percent != prev:
prev= percent
accs.add(acc)
db.commit()
passed_accs = list(accs)
query='SELECT * FROM proteins WHERE Accession in ("{0}")'.format('", "'.join(passed_accs))
df = pd.read_sql_query(sql=query, con=db)
filt = df.groupby(df.Organism).agg({"Summed_NSAF": sum}).sort_values('Summed_NSAF', ascending=False).reset_index()
filt['OrganismProb'] = filt['Summed_NSAF'] / filt['Summed_NSAF'].sum()
filt= filt.set_index('Organism')
org_series = pd.Series(filt.OrganismProb)
org_dict = org_series.to_dict()
cursor1.execute("select SUM(`Summed_NSAF`) from proteins;")
for row in cursor1.fetchall():
summed_summed_nsaf = row[0]
# Add the organism probability
cursor1.execute('SELECT * FROM proteins ORDER BY Summed_NSAF DESC')
names = list(map(lambda x: x[0], cursor1.description))
for row in cursor1:
cols = {}
for i in range(len(row)):
col = names[i]
val = row[i]
cols[col] = val
acc = cols['Accession']
org = cols['Organism']
prot_prob = cols['Summed_NSAF'] / summed_summed_nsaf
try:
org_prob = org_dict[org]
except:
org_prob = 0
combined_prob = prot_prob * org_prob
cmd = "UPDATE proteins set Organism_Prob={} where Accession = '{}';".format(org_prob, acc)
cursor2.execute(cmd)
cmd = "UPDATE proteins set Combined_Prob={} where Accession = '{}';".format(combined_prob, acc)
cursor2.execute(cmd)
cmd = "UPDATE proteins set Protein_Prob={} where Accession = '{}';".format(prot_prob, acc)
cursor2.execute(cmd)
db.commit()
scans = set()
cursor1.execute('SELECT * FROM proteins ORDER BY Combined_Prob DESC, Protein_Prob DESC')
names = list(map(lambda x: x[0], cursor1.description))
prev = 0
accs = set()
for row in cursor1:
cols = {}
for i in range(len(row)):
col = names[i]
val = row[i]
cols[col] = val
acc = cols['Accession']
summed_nsaf = 0
for col in cols:
val = cols[col]
if col == 'Scans':
scans.update([ i for i in val.split('\n') if i != ''])
percent = len(scans)/float(mapped_scans) * 100
if percent != prev:
prev= percent
accs.add(acc)
passed_accs = list(accs)
query='SELECT * FROM proteins WHERE Accession in ("{0}") ORDER BY Combined_Prob DESC, Protein_Prob DESC'.format('", "'.join(passed_accs))
df = pd.read_sql_query(sql=query, con=db)
output = sys.argv[2]
recs = ''.join(df['Record'].tolist())
w = open( output + '/metanovo.fasta' , 'w' )
w.write(recs)
w.close()
df.to_csv(output + '/metanovo.csv')
|
py | 1a541ac987e556801f193f7efb16d77fc8fc7faa | #!/usr/bin/python
import sys
import ipaddress
import json
input = sys.argv[1]
addr = ipaddress.ip_address(input)
output = json.dumps({
'to_ipv4': str(addr.ipv4_mapped) if addr.version == 6 else "<unsupported>",
'to_ipv6': "<unsupported>",
'is_unspecified': addr.is_unspecified,
'is_loopback': addr.is_loopback,
'is_reserved': addr.is_reserved,
'is_benchmarking': "<unsupported>",
'is_documentation': "<unsupported>",
'is_global': addr.is_global,
'is_ietf_protocol_assignment': "<unsupported>",
'is_shared': "<unsupported>",
'is_unicast_link_local': addr.is_link_local,
'is_unicast_site_local': addr.is_site_local if addr.version == 6 else "<unsupported>",
'is_unique_local': "<unsupported>",
'mc_scope_admin_local': "<unsupported>",
'mc_scope_global': "<unsupported>",
'mc_scope_iface_local': "<unsupported>",
'mc_scope_link_local': "<unsupported>",
'mc_scope_org_local': "<unsupported>",
'mc_scope_realm_local': "<unsupported>",
'mc_scope_reserved': "<unsupported>",
'mc_scope_unassigned': "<unsupported>",
})
# normalize output
output = output.replace("\"None\"", "null")
print(output)
|
py | 1a541d8b1c9b4de1db6c63a218c191b8b3fdfc05 | from collections import Counter
|
py | 1a541df2bf87e4d33308ec252774fe9a0f8fe48a | # Define the function for calculating the Eye Aspect Ratio(EAR)
from scipy.spatial import distance as dist
def eye_aspect_ratio(eye):
# Vertical eye landmarks
A = dist.euclidean(eye[1], eye[5])
B = dist.euclidean(eye[2], eye[4])
# Horizontal eye landmarks
C = dist.euclidean(eye[0], eye[3])
# The EAR Equation
EAR = (A + B) / (2.0 * C)
return EAR
def mouth_aspect_ratio(mouth):
A = dist.euclidean(mouth[13], mouth[19])
B = dist.euclidean(mouth[14], mouth[18])
C = dist.euclidean(mouth[15], mouth[17])
MAR = (A + B + C) / 3.0
return MAR |
py | 1a541f23d168c6b4285b6dec82e14891dfabb6a4 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import logging
import math
from typing import List
import torch
import torch.nn as nn
import torch.nn.init as init
from reagent.models.base import ModelBase
logger = logging.getLogger(__name__)
def gaussian_fill_w_gain(tensor, activation, dim_in, min_std=0.0) -> None:
""" Gaussian initialization with gain."""
gain = math.sqrt(2) if (activation == "relu" or activation == "leaky_relu") else 1
init.normal_(tensor, mean=0, std=max(gain * math.sqrt(1 / dim_in), min_std))
ACTIVATION_MAP = {
"tanh": nn.Tanh,
"relu": nn.ReLU,
"leaky_relu": nn.LeakyReLU,
"linear": nn.Identity,
"sigmoid": nn.Sigmoid,
}
class FullyConnectedNetwork(ModelBase):
def __init__(
self,
layers,
activations,
*,
use_batch_norm=False,
min_std=0.0,
dropout_ratio=0.0,
use_layer_norm=False,
normalize_output=False,
) -> None:
super().__init__()
self.input_dim = layers[0]
modules: List[nn.Module] = []
assert len(layers) == len(activations) + 1
for i, ((in_dim, out_dim), activation) in enumerate(
zip(zip(layers, layers[1:]), activations)
):
# Add BatchNorm1d
if use_batch_norm:
modules.append(nn.BatchNorm1d(in_dim))
# Add Linear
linear = nn.Linear(in_dim, out_dim)
gaussian_fill_w_gain(linear.weight, activation, in_dim, min_std=min_std)
init.constant_(linear.bias, 0) # type: ignore
modules.append(linear)
# Add LayerNorm
if use_layer_norm and (normalize_output or i < len(activations) - 1):
modules.append(nn.LayerNorm(out_dim)) # type: ignore
# Add activation
if activation in ACTIVATION_MAP:
modules.append(ACTIVATION_MAP[activation]())
else:
# See if it matches any of the nn modules
modules.append(getattr(nn, activation)())
# Add Dropout
if dropout_ratio > 0.0 and (normalize_output or i < len(activations) - 1):
modules.append(nn.Dropout(p=dropout_ratio))
self.dnn = nn.Sequential(*modules) # type: ignore
def input_prototype(self):
return torch.randn(1, self.input_dim)
def forward(self, input: torch.Tensor) -> torch.Tensor:
"""Forward pass for generic feed-forward DNNs. Assumes activation names
are valid pytorch activation names.
:param input tensor
"""
return self.dnn(input)
|
py | 1a541f2ccd7c6cbe3c9cc0f362951db55c16f5e5 | # qubit number=2
# total number=10
import pyquil
from pyquil.api import local_forest_runtime, QVMConnection
from pyquil import Program, get_qc
from pyquil.gates import *
import numpy as np
conn = QVMConnection()
def make_circuit()-> Program:
prog = Program() # circuit begin
prog += H(0) # number=1
prog += Y(1) # number=2
prog += Y(1) # number=4
prog += Y(1) # number=3
prog += RX(2.0860175219836226,1) # number=7
prog += X(0) # number=5
prog += X(0) # number=6
prog += Y(0) # number=8
prog += Y(0) # number=9
# circuit end
return prog
def summrise_results(bitstrings) -> dict:
d = {}
for l in bitstrings:
if d.get(l) is None:
d[l] = 1
else:
d[l] = d[l] + 1
return d
if __name__ == '__main__':
prog = make_circuit()
qvm = get_qc('1q-qvm')
results = qvm.run_and_measure(prog,1024)
bitstrings = np.vstack([results[i] for i in qvm.qubits()]).T
bitstrings = [''.join(map(str, l)) for l in bitstrings]
writefile = open("../data/startPyquil169.csv","w")
print(summrise_results(bitstrings),file=writefile)
writefile.close()
|
py | 1a54201f0edb9810214e640150f3b800790dc011 | from django.db.models import Q
from .models import Article
class FilterArticles:
def __init__(self):
""" Creates an instance of the FilterArticles class"""
self.filters = []
@classmethod
def by_request(cls, request):
""" Filters articles by request"""
return FilterArticles()._run_filters(request.GET)._results()
def _run_filters(self, keywords):
"""Runs all the filters available on the request class"""
for key in keywords:
self._trigger_filter(key, keywords.get(key))
return self
def _trigger_filter(self, filter, value):
""" Triggers and executes a give filter"""
filter_method = getattr(self, f"_filter_{filter}", None)
if filter_method and value:
filter_method(value)
def _results(self):
""" Returns the filtered results"""
return Article.objects.filter(*self.filters)
def _filter_title(self, value):
""" Filters articles by a given title"""
self.filters.append(Q(title__icontains=value))
def _filter_author(self, value):
""" Filters articles by an author"""
self.filters.append(
Q(author__username__icontains=value) |
Q(author__email__icontains=value)
)
def _filter_tag(self, value):
""" Filters articles by a given tag"""
self.filters.append(Q(tagList__name__icontains=value))
def _filter_description(self, value):
""" Filters articles by a related description"""
self.filters.append(Q(description__icontains=value))
|
py | 1a542215dd692be2da3fdebbda5f26c1dbe6d58d | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Setup for GTalk Pyauto tests."""
import os
import sys
def _SetupPaths():
"""Setting path to find pyauto_functional.py."""
gtalk_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.append(gtalk_dir)
sys.path.append(os.path.normpath(os.path.join(gtalk_dir, os.pardir)))
_SetupPaths()
from pyauto_functional import Main
if __name__ == '__main__':
Main()
|
py | 1a5422e35fe81c124736f3b6b09ad0c8c2976840 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class PatchStatus(webapp.RequestHandler):
def get(self, queue_name, attachment_id):
statuses = QueueStatus.all().filter('queue_name =', queue_name).filter('active_patch_id =', int(attachment_id)).order('-date').fetch(1)
if not statuses:
self.error(404)
return
self.response.out.write(statuses[0].message)
|
py | 1a5423b4ae7870696e6713a3e6892c943ffa39d4 | def test_is_healthy(service):
assert service.is_healthy()
def test_install(empty_application_context):
from fractal.core.services import Service
assert next(Service.install(empty_application_context)).__class__ == Service
|
py | 1a54246788e342770af0b89d60a7cec20edd2ad7 | """Get info from gce metadata and put it into grains store."""
from __future__ import print_function
from __future__ import unicode_literals
import json
import six
def _decode_list(data):
"""Decode list items from unicode to normal strings."""
ret = []
for item in data:
if isinstance(item, six.text_type):
item = item.encode('utf-8')
elif isinstance(item, list):
item = _decode_list(item)
elif isinstance(item, dict):
item = _decode_dict(item)
ret.append(item)
return ret
def _decode_dict(data):
"""Decode dictionary keys and values from unicode to normal strings."""
ret = {}
for key, value in data.items():
if isinstance(key, six.text_type):
key = key.encode('utf-8')
if isinstance(value, six.text_type):
value = value.encode('utf-8')
if isinstance(key, six.binary_type):
key = key.decode('utf-8')
if isinstance(value, six.binary_type):
value = value.decode('utf-8')
elif isinstance(value, list):
value = _decode_list(value)
elif isinstance(value, dict):
value = _decode_dict(value)
ret[key] = value
return ret
def gce_metadata():
"""
Fetch all metadata from GCE.
Also fills in some legacy grain data
"""
ret = {}
http = six.moves.http_client.HTTPConnection('metadata.google.internal')
http.request('GET', '/computeMetadata/v1/instance/?recursive=true', None,
{'Metadata-Flavor': 'Google'})
resp = http.getresponse()
json_str = resp.read().decode('utf-8')
metadata = json.loads(json_str, object_hook=_decode_dict)
ipv4 = metadata['networkInterfaces'][0]['accessConfigs'][0]['externalIp']
ret['pub_fqdn_ipv4'] = ret['external_ip'] = ipv4
ret['tags'] = ret['roles'] = metadata['tags']
ret['zone'] = metadata['zone']
ret['gce'] = metadata
return ret
if __name__ == '__main__':
print(gce_metadata())
|
py | 1a5425790a1d5a97a4ca2859b9909c45f54da59d | #!/usr/bin/env python
# encoding: utf-8
import re
import datetime
def time_fix(time_string):
now_time = datetime.datetime.now()
if '分钟前' in time_string:
minutes = re.search(r'^(\d+)分钟', time_string).group(1)
created_at = now_time - datetime.timedelta(minutes=int(minutes))
return created_at.strftime('%Y-%m-%d %H:%M')
if '小时前' in time_string:
minutes = re.search(r'^(\d+)小时', time_string).group(1)
created_at = now_time - datetime.timedelta(hours=int(minutes))
return created_at.strftime('%Y-%m-%d %H:%M')
if '今天' in time_string:
return time_string.replace('今天', now_time.strftime('%Y-%m-%d'))
if '月' in time_string:
time_string = time_string.replace('月', '-').replace('日', '')
time_string = str(now_time.year) + '-' + time_string
return time_string
return time_string
keyword_re = re.compile('<span class="kt">|</span>|原图|<!-- 是否进行翻译 -->|<span class="cmt">|\[组图共.张\]')
emoji_re = re.compile('<img alt="|" src="//h5\.sinaimg(.*?)/>')
white_space_re = re.compile('<br />')
div_re = re.compile('</div>|<div>')
image_re = re.compile('<img(.*?)/>')
url_re = re.compile('<a href=(.*?)>|</a>')
def extract_weibo_content(weibo_html):
s = weibo_html
if 'class="ctt">' in s:
s = s.split('class="ctt">', maxsplit=1)[1]
s = emoji_re.sub('', s)
s = url_re.sub('', s)
s = div_re.sub('', s)
s = image_re.sub('', s)
if '<span class="ct">' in s:
s = s.split('<span class="ct">')[0]
splits = s.split('赞[')
if len(splits) == 2:
s = splits[0]
if len(splits) == 3:
origin_text = splits[0]
retweet_text = splits[1].split('转发理由:')[1]
s = origin_text + '转发理由:' + retweet_text
s = white_space_re.sub(' ', s)
s = keyword_re.sub('', s)
s = s.replace('\xa0', '')
s = s.strip(':')
s = s.strip()
return s
def extract_comment_content(comment_html):
s = comment_html
if 'class="ctt">' in s:
s = s.split('class="ctt">', maxsplit=1)[1]
s = s.split('举报', maxsplit=1)[0]
s = emoji_re.sub('', s)
s = keyword_re.sub('', s)
s = url_re.sub('', s)
s = div_re.sub('', s)
s = image_re.sub('', s)
s = white_space_re.sub(' ', s)
s = s.replace('\xa0', '')
s = s.strip(':')
s = s.strip()
return s |
py | 1a54258fbe88cb8de632a2e779cf4da9ec8afe42 | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import re
import glob
def file_to_df(file_path):
pattern = "genome=([a-zA-Z0-9]+)_.*_run=(\d+)"
genome, run = re.search(pattern, file_path).groups()
df = pd.read_csv(file_path)
df["fraction_stashed"] = df["nr_stashed"] / max(df["nr_qgrams"])
df["Time per access (μs)"] = (df["access_time (ns)"] / df["nr_qgrams"]) / (10**3)
df["Total Runtime (s)"] = df["total_time (ns)"] / (10**9)
df["Total Access Time (s)"] = df["access_time (ns)"] / (10**9)
df["h"] = [f"H={h}" for h in df.h]
df["run"] = int(run)
df["genome"] = genome
return df
def get_df():
dfs = []
for index_file in snakemake.input.stats:
dfs.append(file_to_df(index_file))
df = pd.concat(dfs)
df["genome"] = pd.Categorical(df["genome"], ["mxanthus", "pfalciparum", "hops", "hg38"])
df["h"] = pd.Categorical(df["h"], ["H=8", "H=16", "H=24"])
df.sort_values("genome")
return df
def plot():
df = get_df()
df = df.rename(
columns={
"h": "Hopscotch Neighborhood",
"total_time (ns)": "Total Runtime (ns)",
"genome": "Genome",
}
)
sns.set(
style="whitegrid",
font_scale=1.2,
)
sns.despine()
g = sns.catplot(
x="Hopscotch Neighborhood",
y="Time per access (μs)",
row="q",
col="Genome",
hue="hf",
kind="point",
size=5,
aspect=0.8,
data=df,
legend_out=False,
margin_titles=True,
dodge=True,
)
plt.savefig(snakemake.output.access_time_pdf)
if __name__ == "__main__":
plot()
|
py | 1a54263062ccf9e3f8c617fd41cff2b6ddb808b8 | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import logging
import json
from inspect import currentframe
class GGIoT:
# Constructor
def __init__(self, thing='default', prefix='sputnik', dummy=False):
self.thing = thing
self.prefix = prefix
self.topicPrefix = self.prefix + '/' + self.thing + '/'
self.topicLogger = self.topicPrefix + 'logger'
if dummy == False:
import greengrasssdk
ggsdk = greengrasssdk.client('iot-data')
def prepPublish(topic=self.topicLogger, payload={}):
ggsdk.publish(topic=topic, payload=json.dumps(payload))
def prepUpdateShadow(thing=self.thing, payload={}):
ggsdk.update_thing_shadow(thingName=thing, payload=json.dumps(payload))
def prepGetShadow(thingName=self.thing):
response = ggsdk.get_thing_shadow(thingName=thingName)
payloadDict = json.loads(response['payload'])
return payloadDict
self.publish = prepPublish
self.updateThingShadow = prepUpdateShadow
self.getThingShadow = prepGetShadow
else:
logging.warn("Setting up GGSDK in dummy mode")
def debug(topic=self.topicLogger, payload={}):
logging.debug(topic + ': ' + json.dumps(payload))
def debugUpdateShadow(thing=self.thing, payload={}):
logging.debug("ggsdk.updateThingShadow: " + thing + ": " + json.dumps(payload))
def debugGetShadow(thing=self.thing, payload={}):
logging.debug("ggsdk.getThingShadow: " + thing + ": {}")
return {}
self.publish = debug
self.updateThingShadow = debugUpdateShadow
self.getThingShadow = debugGetShadow
def info(self, data):
self.publish(topic=self.topicLogger, payload={
"type": "info",
"payload": data
})
def exception(self, err):
self.publish(topic=self.topicLogger, payload={
"type": "exception",
"line": currentframe().f_back.f_lineno,
"payload": err
})
def publish(self, topic, data):
self.publish(topic=topic, payload=data)
def updateThingShadow(self, data):
self.updateThingShadow(thing=self.thing, payload=data)
def getThingShadow(self):
return self.getThingShadow()
|
py | 1a5426cfcfa63091412c72078fab26ec7ffdaf79 | import logging
from pylons import request, response, session, tmpl_context as c, url
from pylons.controllers.util import abort, redirect
from pyfisheyes.lib.base import BaseController, render
log = logging.getLogger(__name__)
class SessionController(BaseController):
def _index(self):
# Return a rendered template
#return render('/session.mako')
# or, return a string
return 'Hello World'
def index(self):
name = session.get('name', 'NULL')
return 'session name=%s' % name
def setsession(self):
session['name'] = 'tony'
session.save()
return "save session ok"
|
py | 1a5427571756dcef9c3d95365934fb79c4de4e03 | def output():
print('\n'"Customer Code: ", a)
print("Beginning Meter Reading: ", b)
print("Ending Meter Reading: ", c)
print("Gallons of Water Used: ", gallons_used)
print("Amount Billed: $", bill,'\n')
while True:
a = input("Enter code:\n ")
a = a.lower() #Changing the customer code to lower case
if a == 'r' or a == 'c' or a == 'i': #To continue, the customer code should be either r or c or i
b = input("Enter Beginning meter reading:\n ") #Getting Customer's Beginning meter number
if len(b) <= 9: #Checking the length of the Beginning meter number
c = input("Enter Ending meter reading:\n ") #Getting the Custmer's Ending meter number
if len(c) <= 9: #Checking the lenth of Customer's Ending meter number
bmn = int(b) #Converting the beginning and ending meter numbers to integers
emn = int(c)
gallons = emn - bmn #Calculating for the gallons as the difference of the meter numbers
gallons_used = gallons * 0.1 #Determining the gallons used as tenths
gallons_used = round(gallons_used, 2)
if a == 'r': #Considering whether the customer's code was r for residents
amount = gallons_used * 0.0005 #Calculating the amount to be paid for all gallons used
amount_billed = 5 + amount #Totaling the amount to be billed to the customer adding the standard payment of $5.00
bill = float(round(amount_billed, 2))
output() #calling the output function to display the customer's particulars
continue #Using continue to tell the program, that if the customer code was not r, go to the next
elif a == 'c':
if gallons_used <= 4000000:
amount_billed = 1000.00
bill = float(round(amount_billed, 2))
else:
excess_gallons = gallons_used - 4000000
amount = excess_gallons * 0.00025
amount_billed = 1000.00 + amount
bill = float(round(amount_billed, 2))
output()
continue
elif a == 'i':
if gallons_used <= 4000000:
amount_billed = 1000.00
bill = float(round(amount_billed, 2))
elif gallons_used > 4000000 and gallons_used <= 10000000:
amount_billed = 2000.00
bill = float(round(amount_billed, 2))
elif gallons_used > 10000000:
excess_gallons = gallons_used - 10000000
amount = excess_gallons * 0.00025
amount_billed = 2000.00 + amount
bill = float(round(amount_billed, 2))
output()
else:
print("Invalid Ending Meter Reading")
continue
else:
print("Invalid Beginning Meter Number")
continue
else:
print("Invalid Customer Code")
continue |
py | 1a5428061e26d2150ffc9db0c5f7fc5eead758b9 | from django import template
import datetime
from website.models import *
register = template.Library()
# tag nay dung trong gio hang
@register.simple_tag(takes_context=True)
def get_image_product(context, id_product):
product = Product.objects.get(id=id_product)
if product.type_product == False:
id_origin_product = Link_Type.objects.get(product_id_id=id_product).parent_product
else:
id_origin_product = id_product
image = Product_Image.objects.filter(product_id_id=id_origin_product).order_by('image_id_id').first()
return '/product' + image.image_id.image_link.url
@register.simple_tag(takes_context=True)
def get_price_discount(context, price, discount):
return price * (100 - discount)/100
|
py | 1a542836721cd1282572276e6c9771e4a3370ce1 | '''
Author: alex
Created Time: 2020年08月20日 星期四 16时09分37秒
'''
import cv2
import numpy as np
def remove_watermark(image, thr=200, convol=3):
"""
简单粗暴去水印,可将将pdf或者扫描件中水印去除
使用卷积来优化计算
:param image: 输入图片,cv格式灰度图像
:param thr: 去除图片中像素阈值
:param convol: 卷积窗口的大小
:return: 返回np.array格式图片
"""
distance = int((convol - 1) / 2) # 为了执行卷积,对图像连缘进行像素扩充
# 使用白色来进行边缘像素扩充
image = cv2.copyMakeBorder(image, distance, distance, distance, distance,
cv2.BORDER_CONSTANT, value=255)
mask = (image < 200).astype(int)
# 单位矩阵卷积操作
mask = cv2.boxFilter(mask, -1, (convol, convol), normalize=False)
mask = (mask >= 1).astype(int) # 掩膜构建完成,>=1表示窗口内有黑点
image[np.where(mask == 0)] = 255 # 掩膜中为0的位置赋值为255,白色,达到去水印效果
h, w = image.shape[:2]
image = image[distance:h - distance, distance:w - distance]
return image
def bak_remove_watermark(image, thr=200, distance=1):
"""
简单粗暴去水印,可将将pdf或者扫描件中水印去除
:param image: 输入图片,Image格式
:param thr: 去除图片中像素阈值
:param distance: 去除图片中像素距离
:return: 返回np.arrayg格式图片
"""
w, h = image.size
rgb_im = image.convert('RGB')
for x in range(0, w - 1):
for y in range(0, h - 1):
if not hasBlackAround(x, y, distance, rgb_im, thr=thr):
rgb_im.putpixel((x, y), (255, 255, 255))
return rgb_im
def hasBlackAround(x, y, distance, img, thr=200):
w, h = img.size
startX = max(0, x-distance)
startY = max(0, y-distance)
endX = min(w-1, x+distance)
endY = min(h-1, y+distance)
for j in range(startX, endX):
for k in range(startY, endY):
r, g, b = img.getpixel((j, k))
if r < thr and g < thr and b < thr:
# 满足条件的点黑点
return True
return False
if __name__ == '__main__':
from PIL import Image
debug = False
image_path = "gf-png/gf1.png"
img = Image.open(image_path)
res_img = remove_watermark(img, thr=100, distance=1)
|
py | 1a5429791283ec7f15c5421a538d9a2df253c12f | """
ASGI config for My_shop project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'My_shop.settings')
application = get_asgi_application()
|
py | 1a542aa83d8601d5ee6ef67a7f140ff5348fbb33 | # Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
__version__ = '0.2.0'
|
py | 1a542c4228046acc8769c38e45e2b5b78aa1f357 | # 4-3. Counting to Twenty
for number in range(1, 21):
print(number)
|
py | 1a542db7029a6057ab5b480ed969bd5e3cfb57be | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""configure script to get build parameters from user."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import errno
import glob
import os
import platform
import re
import subprocess
import sys
# pylint: disable=g-import-not-at-top
try:
from shutil import which
except ImportError:
from distutils.spawn import find_executable as which
# pylint: enable=g-import-not-at-top
_DEFAULT_CUDA_VERSION = '10'
_DEFAULT_CUDNN_VERSION = '7'
_DEFAULT_CUDA_COMPUTE_CAPABILITIES = '3.5,7.0'
_DEFAULT_GCC_TOOLCHAIN_PATH = ''
_DEFAULT_GCC_TOOLCHAIN_TARGET = ''
_DEFAULT_PROMPT_ASK_ATTEMPTS = 10
_TF_BAZELRC_FILENAME = '.tf_plugin_configure.bazelrc'
_TF_WORKSPACE_ROOT = ''
_TF_BAZELRC = ''
_TF_CURRENT_BAZEL_VERSION = None
NCCL_LIB_PATHS = [
'lib64/', 'lib/powerpc64le-linux-gnu/', 'lib/x86_64-linux-gnu/', ''
]
class UserInputError(Exception):
pass
def is_windows():
return platform.system() == 'Windows'
def is_linux():
return platform.system() == 'Linux'
def is_macos():
return platform.system() == 'Darwin'
def is_ppc64le():
return platform.machine() == 'ppc64le'
def is_cygwin():
return platform.system().startswith('CYGWIN_NT')
def get_input(question):
try:
try:
answer = raw_input(question)
except NameError:
answer = input(question) # pylint: disable=bad-builtin
except EOFError:
answer = ''
return answer
def symlink_force(target, link_name):
"""Force symlink, equivalent of 'ln -sf'.
Args:
target: items to link to.
link_name: name of the link.
"""
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def sed_in_place(filename, old, new):
"""Replace old string with new string in file.
Args:
filename: string for filename.
old: string to replace.
new: new string to replace to.
"""
with open(filename, 'r') as f:
filedata = f.read()
newdata = filedata.replace(old, new)
with open(filename, 'w') as f:
f.write(newdata)
def write_to_bazelrc(line):
with open(_TF_BAZELRC, 'a') as f:
f.write(line + '\n')
def write_action_env_to_bazelrc(var_name, var):
write_to_bazelrc('build --action_env %s="%s"' % (var_name, str(var)))
def run_shell(cmd, allow_non_zero=False):
if allow_non_zero:
try:
output = subprocess.check_output(cmd)
except subprocess.CalledProcessError as e:
output = e.output
else:
output = subprocess.check_output(cmd)
return output.decode('UTF-8').strip()
def cygpath(path):
"""Convert path from posix to windows."""
return os.path.abspath(path).replace('\\', '/')
def get_python_path(environ_cp, python_bin_path):
"""Get the python site package paths."""
python_paths = []
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
try:
library_paths = run_shell([
python_bin_path, '-c',
'import site; print("\\n".join(site.getsitepackages()))'
]).split('\n')
except subprocess.CalledProcessError:
library_paths = [
run_shell([
python_bin_path, '-c',
'from distutils.sysconfig import get_python_lib;'
'print(get_python_lib())'
])
]
all_paths = set(python_paths + library_paths)
paths = []
for path in all_paths:
if os.path.isdir(path):
paths.append(path)
return paths
def get_python_major_version(python_bin_path):
"""Get the python major version."""
return run_shell([python_bin_path, '-c', 'import sys; print(sys.version[0])'])
def setup_python(environ_cp):
"""Setup python related env variables."""
# Get PYTHON_BIN_PATH, default is the current running python.
default_python_bin_path = sys.executable
ask_python_bin_path = ('Please specify the location of python. [Default is '
'%s]: ') % default_python_bin_path
while True:
python_bin_path = get_from_env_or_user_or_default(environ_cp,
'PYTHON_BIN_PATH',
ask_python_bin_path,
default_python_bin_path)
# Check if the path is valid
if os.path.isfile(python_bin_path) and os.access(python_bin_path, os.X_OK):
break
elif not os.path.exists(python_bin_path):
print('Invalid python path: %s cannot be found.' % python_bin_path)
else:
print('%s is not executable. Is it the python binary?' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = ''
# Convert python path to Windows style before checking lib and version
if is_windows() or is_cygwin():
python_bin_path = cygpath(python_bin_path)
# Get PYTHON_LIB_PATH
python_lib_path = environ_cp.get('PYTHON_LIB_PATH')
if not python_lib_path:
python_lib_paths = get_python_path(environ_cp, python_bin_path)
if environ_cp.get('USE_DEFAULT_PYTHON_LIB_PATH') == '1':
python_lib_path = python_lib_paths[0]
else:
print('Found possible Python library paths:\n %s' %
'\n '.join(python_lib_paths))
default_python_lib_path = python_lib_paths[0]
python_lib_path = get_input(
'Please input the desired Python library path to use. '
'Default is [%s]\n' % python_lib_paths[0])
if not python_lib_path:
python_lib_path = default_python_lib_path
environ_cp['PYTHON_LIB_PATH'] = python_lib_path
_ = get_python_major_version(python_bin_path)
# Convert python path to Windows style before writing into bazel.rc
if is_windows() or is_cygwin():
python_lib_path = cygpath(python_lib_path)
# Set-up env variables used by python_configure.bzl
write_action_env_to_bazelrc('PYTHON_BIN_PATH', python_bin_path)
write_action_env_to_bazelrc('PYTHON_LIB_PATH', python_lib_path)
write_to_bazelrc('build --python_path=\"%s"' % python_bin_path)
environ_cp['PYTHON_BIN_PATH'] = python_bin_path
# If choosen python_lib_path is from a path specified in the PYTHONPATH
# variable, need to tell bazel to include PYTHONPATH
if environ_cp.get('PYTHONPATH'):
python_paths = environ_cp.get('PYTHONPATH').split(':')
if python_lib_path in python_paths:
write_action_env_to_bazelrc('PYTHONPATH', environ_cp.get('PYTHONPATH'))
# Write tools/python_bin_path.sh
with open(
os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow_plugin', 'tools', 'python_bin_path.sh'),
'w') as f:
f.write('export PYTHON_BIN_PATH="%s"' % python_bin_path)
def get_python_lib_name(environ_cp):
python_bin_path = environ_cp['PYTHON_BIN_PATH']
path_list = python_bin_path.split(os.sep)[:-2]
path_list.append('lib')
py_lib_path = os.sep.join(path_list)
for _, _, files in os.walk(py_lib_path):
for name in files:
if str(name).startswith('libpython') and str(name).endswith('.so'):
# strip libxxx.so to get xxx
return str(name).strip()[3:-3]
def get_python_link_path(environ_cp):
# TODO(quintin): we need to link libpythonx.y.so for _pywrap_tensorflow_internal.so
# once google change CAPI symbols into libtensorflow.so, we don't need this
python_bin_path = environ_cp['PYTHON_BIN_PATH']
path_list = python_bin_path.split(os.sep)[:-2]
path_list.append('lib')
py_lib_path = os.sep.join(path_list)
return py_lib_path
def create_build_configuration(environ_cp):
tf_header_dir = environ_cp['PYTHON_LIB_PATH'] + "/tensorflow/include"
tf_shared_lib_dir = environ_cp['PYTHON_LIB_PATH'] + "/tensorflow/"
write_action_env_to_bazelrc("TF_HEADER_DIR", tf_header_dir)
write_action_env_to_bazelrc("TF_SHARED_LIBRARY_DIR", tf_shared_lib_dir)
write_action_env_to_bazelrc("TF_CXX11_ABI_FLAG", 1)
write_action_env_to_bazelrc("PYTHON_LINK_LIB_NAME", get_python_lib_name(environ_cp))
write_action_env_to_bazelrc("PYTHON_LINK_PATH", get_python_link_path(environ_cp))
def reset_tf_configure_bazelrc():
"""Reset file that contains customized config settings."""
open(_TF_BAZELRC, 'w').close()
def cleanup_makefile():
"""Delete any leftover BUILD files from the Makefile build.
These files could interfere with Bazel parsing.
"""
makefile_download_dir = os.path.join(_TF_WORKSPACE_ROOT, 'tensorflow',
'contrib', 'makefile', 'downloads')
if os.path.isdir(makefile_download_dir):
for root, _, filenames in os.walk(makefile_download_dir):
for f in filenames:
if f.endswith('BUILD'):
os.remove(os.path.join(root, f))
def get_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None):
"""Get boolean input from user.
If var_name is not set in env, ask user to enable query_item or not. If the
response is empty, use the default.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
Returns:
boolean value of the variable.
Raises:
UserInputError: if an environment variable is set, but it cannot be
interpreted as a boolean indicator, assume that the user has made a
scripting error, and will continue to provide invalid input.
Raise the error to avoid infinitely looping.
"""
if not question:
question = 'Do you wish to build TensorFlow plug-in with %s support?' % query_item
if not yes_reply:
yes_reply = '%s support will be enabled for TensorFlow plug-in.' % query_item
if not no_reply:
no_reply = 'No %s' % yes_reply
yes_reply += '\n'
no_reply += '\n'
if enabled_by_default:
question += ' [Y/n]: '
else:
question += ' [y/N]: '
var = environ_cp.get(var_name)
if var is not None:
var_content = var.strip().lower()
true_strings = ('1', 't', 'true', 'y', 'yes')
false_strings = ('0', 'f', 'false', 'n', 'no')
if var_content in true_strings:
var = True
elif var_content in false_strings:
var = False
else:
raise UserInputError(
'Environment variable %s must be set as a boolean indicator.\n'
'The following are accepted as TRUE : %s.\n'
'The following are accepted as FALSE: %s.\n'
'Current value is %s.' %
(var_name, ', '.join(true_strings), ', '.join(false_strings), var))
while var is None:
user_input_origin = get_input(question)
user_input = user_input_origin.strip().lower()
if user_input == 'y':
print(yes_reply)
var = True
elif user_input == 'n':
print(no_reply)
var = False
elif not user_input:
if enabled_by_default:
print(yes_reply)
var = True
else:
print(no_reply)
var = False
else:
print('Invalid selection: %s' % user_input_origin)
return var
def set_build_var(environ_cp,
var_name,
query_item,
option_name,
enabled_by_default,
bazel_config_name=None):
"""Set if query_item will be enabled for the build.
Ask user if query_item will be enabled. Default is used if no input is given.
Set subprocess environment variable and write to .bazelrc if enabled.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
option_name: string for option to define in .bazelrc.
enabled_by_default: boolean for default behavior.
bazel_config_name: Name for Bazel --config argument to enable build feature.
"""
var = str(int(get_var(environ_cp, var_name, query_item, enabled_by_default)))
environ_cp[var_name] = var
if var == '1':
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
write_to_bazelrc('build --config=%s' % bazel_config_name)
elif bazel_config_name is not None:
# TODO(mikecase): Migrate all users of configure.py to use --config Bazel
# options and not to set build configs through environment variables.
write_to_bazelrc('build:%s --define %s=true' %
(bazel_config_name, option_name))
def set_action_env_var(environ_cp,
var_name,
query_item,
enabled_by_default,
question=None,
yes_reply=None,
no_reply=None,
bazel_config_name=None):
"""Set boolean action_env variable.
Ask user if query_item will be enabled. Default is used if no input is given.
Set environment variable and write to .bazelrc.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
query_item: string for feature related to the variable, e.g. "CUDA for
Nvidia GPUs".
enabled_by_default: boolean for default behavior.
question: optional string for how to ask for user input.
yes_reply: optional string for reply when feature is enabled.
no_reply: optional string for reply when feature is disabled.
bazel_config_name: adding config to .bazelrc instead of action_env.
"""
var = int(
get_var(environ_cp, var_name, query_item, enabled_by_default, question,
yes_reply, no_reply))
if not bazel_config_name:
write_action_env_to_bazelrc(var_name, var)
elif var:
write_to_bazelrc('build --config=%s' % bazel_config_name)
environ_cp[var_name] = str(var)
def convert_version_to_int(version):
"""Convert a version number to a integer that can be used to compare.
Version strings of the form X.YZ and X.Y.Z-xxxxx are supported. The
'xxxxx' part, for instance 'homebrew' on OS/X, is ignored.
Args:
version: a version to be converted
Returns:
An integer if converted successfully, otherwise return None.
"""
version = version.split('-')[0]
version_segments = version.split('.')
# Treat "0.24" as "0.24.0"
if len(version_segments) == 2:
version_segments.append('0')
for seg in version_segments:
if not seg.isdigit():
return None
version_str = ''.join(['%03d' % int(seg) for seg in version_segments])
return int(version_str)
def check_bazel_version(min_version, max_version):
"""Check installed bazel version is between min_version and max_version.
Args:
min_version: string for minimum bazel version (must exist!).
max_version: string for maximum bazel version (must exist!).
Returns:
The bazel version detected.
"""
if os.path.exists("./.bazelversion"):
curr_version = run_shell(
['cat', '.bazelversion'])
else :
if which('bazel') is None:
print('Cannot find bazel. Please install bazel.')
sys.exit(0)
curr_version = run_shell(
['bazel', '--batch', '--bazelrc=/dev/null', 'version'])
for line in curr_version.split('\n'):
if 'Build label: ' in line:
curr_version = line.split('Build label: ')[1]
break
min_version_int = convert_version_to_int(min_version)
curr_version_int = convert_version_to_int(curr_version)
max_version_int = convert_version_to_int(max_version)
# Check if current bazel version can be detected properly.
if not curr_version_int:
print('WARNING: current bazel installation is not a release version.')
print('Make sure you are running at least bazel %s' % min_version)
return curr_version
print('You have bazel %s installed.' % curr_version)
if curr_version_int < min_version_int:
print('Please upgrade your bazel installation to version %s or higher to '
'build TensorFlow!' % min_version)
sys.exit(1)
if (curr_version_int > max_version_int and
'TF_IGNORE_MAX_BAZEL_VERSION' not in os.environ):
print('Please downgrade your bazel installation to version %s or lower to '
'build TensorFlow! To downgrade: download the installer for the old '
'version (from https://github.com/bazelbuild/bazel/releases) then '
'run the installer.' % max_version)
sys.exit(1)
return curr_version
def set_cc_opt_flags(environ_cp):
"""Set up architecture-dependent optimization flags.
Also append CC optimization flags to bazel.rc..
Args:
environ_cp: copy of the os.environ.
"""
if is_ppc64le():
# gcc on ppc64le does not support -march, use mcpu instead
default_cc_opt_flags = '-mcpu=native'
elif is_windows():
default_cc_opt_flags = '/arch:AVX'
else:
default_cc_opt_flags = '-march=native -Wno-sign-compare'
question = ('Please specify optimization flags to use during compilation when'
' bazel option "--config=opt" is specified [Default is %s]: '
) % default_cc_opt_flags
cc_opt_flags = get_from_env_or_user_or_default(environ_cp, 'CC_OPT_FLAGS',
question, default_cc_opt_flags)
for opt in cc_opt_flags.split():
write_to_bazelrc('build:opt --copt=%s' % opt)
# It should be safe on the same build host.
if not is_ppc64le() and not is_windows():
write_to_bazelrc('build:opt --host_copt=-march=native')
write_to_bazelrc('build:opt --define with_default_optimizations=true')
def set_tf_cuda_clang(environ_cp):
"""set TF_CUDA_CLANG action_env.
Args:
environ_cp: copy of the os.environ.
"""
question = 'Do you want to use clang as CUDA compiler?'
yes_reply = 'Clang will be used as CUDA compiler.'
no_reply = 'nvcc will be used as CUDA compiler.'
set_action_env_var(
environ_cp,
'TF_CUDA_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='cuda_clang')
def set_tf_download_clang(environ_cp):
"""Set TF_DOWNLOAD_CLANG action_env."""
question = 'Do you wish to download a fresh release of clang? (Experimental)'
yes_reply = 'Clang will be downloaded and used to compile tensorflow.'
no_reply = 'Clang will not be downloaded.'
set_action_env_var(
environ_cp,
'TF_DOWNLOAD_CLANG',
None,
False,
question=question,
yes_reply=yes_reply,
no_reply=no_reply,
bazel_config_name='download_clang')
def get_from_env_or_user_or_default(environ_cp, var_name, ask_for_var,
var_default):
"""Get var_name either from env, or user or default.
If var_name has been set as environment variable, use the preset value, else
ask for user input. If no input is provided, the default is used.
Args:
environ_cp: copy of the os.environ.
var_name: string for name of environment variable, e.g. "TF_NEED_CUDA".
ask_for_var: string for how to ask for user input.
var_default: default value string.
Returns:
string value for var_name
"""
var = environ_cp.get(var_name)
if not var:
var = get_input(ask_for_var)
print('\n')
if not var:
var = var_default
return var
def set_clang_cuda_compiler_path(environ_cp):
"""Set CLANG_CUDA_COMPILER_PATH."""
default_clang_path = which('clang') or ''
ask_clang_path = ('Please specify which clang should be used as device and '
'host compiler. [Default is %s]: ') % default_clang_path
while True:
clang_cuda_compiler_path = get_from_env_or_user_or_default(
environ_cp, 'CLANG_CUDA_COMPILER_PATH', ask_clang_path,
default_clang_path)
if os.path.exists(clang_cuda_compiler_path):
break
# Reset and retry
print('Invalid clang path: %s cannot be found.' % clang_cuda_compiler_path)
environ_cp['CLANG_CUDA_COMPILER_PATH'] = ''
# Set CLANG_CUDA_COMPILER_PATH
environ_cp['CLANG_CUDA_COMPILER_PATH'] = clang_cuda_compiler_path
write_action_env_to_bazelrc('CLANG_CUDA_COMPILER_PATH',
clang_cuda_compiler_path)
def prompt_loop_or_load_from_env(environ_cp,
var_name,
var_default,
ask_for_var,
check_success,
error_msg,
suppress_default_error=False,
n_ask_attempts=_DEFAULT_PROMPT_ASK_ATTEMPTS):
"""Loop over user prompts for an ENV param until receiving a valid response.
For the env param var_name, read from the environment or verify user input
until receiving valid input. When done, set var_name in the environ_cp to its
new value.
Args:
environ_cp: (Dict) copy of the os.environ.
var_name: (String) string for name of environment variable, e.g. "TF_MYVAR".
var_default: (String) default value string.
ask_for_var: (String) string for how to ask for user input.
check_success: (Function) function that takes one argument and returns a
boolean. Should return True if the value provided is considered valid. May
contain a complex error message if error_msg does not provide enough
information. In that case, set suppress_default_error to True.
error_msg: (String) String with one and only one '%s'. Formatted with each
invalid response upon check_success(input) failure.
suppress_default_error: (Bool) Suppress the above error message in favor of
one from the check_success function.
n_ask_attempts: (Integer) Number of times to query for valid input before
raising an error and quitting.
Returns:
[String] The value of var_name after querying for input.
Raises:
UserInputError: if a query has been attempted n_ask_attempts times without
success, assume that the user has made a scripting error, and will
continue to provide invalid input. Raise the error to avoid infinitely
looping.
"""
default = environ_cp.get(var_name) or var_default
full_query = '%s [Default is %s]: ' % (
ask_for_var,
default,
)
for _ in range(n_ask_attempts):
val = get_from_env_or_user_or_default(environ_cp, var_name, full_query,
default)
if check_success(val):
break
if not suppress_default_error:
print(error_msg % val)
environ_cp[var_name] = ''
else:
raise UserInputError('Invalid %s setting was provided %d times in a row. '
'Assuming to be a scripting mistake.' %
(var_name, n_ask_attempts))
environ_cp[var_name] = val
return val
def create_android_ndk_rule(environ_cp):
"""Set ANDROID_NDK_HOME and write Android NDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_ndk_path = cygpath('%s/Android/Sdk/ndk-bundle' %
environ_cp['APPDATA'])
elif is_macos():
default_ndk_path = '%s/library/Android/Sdk/ndk-bundle' % environ_cp['HOME']
else:
default_ndk_path = '%s/Android/Sdk/ndk-bundle' % environ_cp['HOME']
def valid_ndk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'source.properties')))
android_ndk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_HOME',
var_default=default_ndk_path,
ask_for_var='Please specify the home path of the Android NDK to use.',
check_success=valid_ndk_path,
error_msg=('The path %s or its child file "source.properties" '
'does not exist.'))
write_action_env_to_bazelrc('ANDROID_NDK_HOME', android_ndk_home_path)
write_action_env_to_bazelrc(
'ANDROID_NDK_API_LEVEL',
get_ndk_api_level(environ_cp, android_ndk_home_path))
def create_android_sdk_rule(environ_cp):
"""Set Android variables and write Android SDK WORKSPACE rule."""
if is_windows() or is_cygwin():
default_sdk_path = cygpath('%s/Android/Sdk' % environ_cp['APPDATA'])
elif is_macos():
default_sdk_path = '%s/library/Android/Sdk' % environ_cp['HOME']
else:
default_sdk_path = '%s/Android/Sdk' % environ_cp['HOME']
def valid_sdk_path(path):
return (os.path.exists(path) and
os.path.exists(os.path.join(path, 'platforms')) and
os.path.exists(os.path.join(path, 'build-tools')))
android_sdk_home_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_SDK_HOME',
var_default=default_sdk_path,
ask_for_var='Please specify the home path of the Android SDK to use.',
check_success=valid_sdk_path,
error_msg=('Either %s does not exist, or it does not contain the '
'subdirectories "platforms" and "build-tools".'))
platforms = os.path.join(android_sdk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [x.replace('android-', '') for x in api_levels]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_sdk_home_path, 'platforms',
'android-' + api_level))
android_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_API_LEVEL',
var_default=api_levels[-1],
ask_for_var=('Please specify the Android SDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the SDK path.')
build_tools = os.path.join(android_sdk_home_path, 'build-tools')
versions = sorted(os.listdir(build_tools))
def valid_build_tools(version):
return os.path.exists(
os.path.join(android_sdk_home_path, 'build-tools', version))
android_build_tools_version = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_BUILD_TOOLS_VERSION',
var_default=versions[-1],
ask_for_var=('Please specify an Android build tools version to use. '
'[Available versions: %s]') % versions,
check_success=valid_build_tools,
error_msg=('The selected SDK does not have build-tools version %s '
'available.'))
write_action_env_to_bazelrc('ANDROID_BUILD_TOOLS_VERSION',
android_build_tools_version)
write_action_env_to_bazelrc('ANDROID_SDK_API_LEVEL', android_api_level)
write_action_env_to_bazelrc('ANDROID_SDK_HOME', android_sdk_home_path)
def get_ndk_api_level(environ_cp, android_ndk_home_path):
"""Gets the appropriate NDK API level to use for the provided Android NDK path."""
# First check to see if we're using a blessed version of the NDK.
properties_path = '%s/source.properties' % android_ndk_home_path
if is_windows() or is_cygwin():
properties_path = cygpath(properties_path)
with open(properties_path, 'r') as f:
filedata = f.read()
revision = re.search(r'Pkg.Revision = (\d+)', filedata)
if revision:
ndk_version = revision.group(1)
else:
raise Exception('Unable to parse NDK revision.')
if int(ndk_version) not in _SUPPORTED_ANDROID_NDK_VERSIONS:
print('WARNING: The NDK version in %s is %s, which is not '
'supported by Bazel (officially supported versions: %s). Please use '
'another version. Compiling Android targets may result in confusing '
'errors.\n' % (android_ndk_home_path, ndk_version,
_SUPPORTED_ANDROID_NDK_VERSIONS))
# Now grab the NDK API level to use. Note that this is different from the
# SDK API level, as the NDK API level is effectively the *min* target SDK
# version.
platforms = os.path.join(android_ndk_home_path, 'platforms')
api_levels = sorted(os.listdir(platforms))
api_levels = [
x.replace('android-', '') for x in api_levels if 'android-' in x
]
def valid_api_level(api_level):
return os.path.exists(
os.path.join(android_ndk_home_path, 'platforms',
'android-' + api_level))
android_ndk_api_level = prompt_loop_or_load_from_env(
environ_cp,
var_name='ANDROID_NDK_API_LEVEL',
var_default='18', # 18 is required for GPU acceleration.
ask_for_var=('Please specify the (min) Android NDK API level to use. '
'[Available levels: %s]') % api_levels,
check_success=valid_api_level,
error_msg='Android-%s is not present in the NDK path.')
return android_ndk_api_level
def set_gcc_host_compiler_path(environ_cp):
"""Set GCC_HOST_COMPILER_PATH."""
default_gcc_host_compiler_path = which('gcc') or ''
cuda_bin_symlink = '%s/bin/gcc' % environ_cp.get('CUDA_TOOLKIT_PATH')
if os.path.islink(cuda_bin_symlink):
# os.readlink is only available in linux
default_gcc_host_compiler_path = os.path.realpath(cuda_bin_symlink)
gcc_host_compiler_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_HOST_COMPILER_PATH',
var_default=default_gcc_host_compiler_path,
ask_for_var='Please specify which gcc should be used by nvcc as the host compiler.',
check_success=os.path.exists,
error_msg='Invalid gcc path. %s cannot be found.',
)
write_action_env_to_bazelrc('GCC_HOST_COMPILER_PATH', gcc_host_compiler_path)
def reformat_version_sequence(version_str, sequence_count):
"""Reformat the version string to have the given number of sequences.
For example:
Given (7, 2) -> 7.0
(7.0.1, 2) -> 7.0
(5, 1) -> 5
(5.0.3.2, 1) -> 5
Args:
version_str: String, the version string.
sequence_count: int, an integer.
Returns:
string, reformatted version string.
"""
v = version_str.split('.')
if len(v) < sequence_count:
v = v + (['0'] * (sequence_count - len(v)))
return '.'.join(v[:sequence_count])
def set_tf_cuda_paths(environ_cp):
"""Set TF_CUDA_PATHS."""
ask_cuda_paths = (
'Please specify the comma-separated list of base paths to look for CUDA '
'libraries and headers. [Leave empty to use the default]: ')
tf_cuda_paths = get_from_env_or_user_or_default(environ_cp, 'TF_CUDA_PATHS',
ask_cuda_paths, '')
if tf_cuda_paths:
environ_cp['TF_CUDA_PATHS'] = tf_cuda_paths
def set_tf_cuda_version(environ_cp):
"""Set TF_CUDA_VERSION."""
ask_cuda_version = (
'Please specify the CUDA SDK version you want to use. '
'[Leave empty to default to CUDA %s]: ') % _DEFAULT_CUDA_VERSION
tf_cuda_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDA_VERSION',
ask_cuda_version,
_DEFAULT_CUDA_VERSION)
environ_cp['TF_CUDA_VERSION'] = tf_cuda_version
def set_tf_cudnn_version(environ_cp):
"""Set TF_CUDNN_VERSION."""
ask_cudnn_version = (
'Please specify the cuDNN version you want to use. '
'[Leave empty to default to cuDNN %s]: ') % _DEFAULT_CUDNN_VERSION
tf_cudnn_version = get_from_env_or_user_or_default(environ_cp,
'TF_CUDNN_VERSION',
ask_cudnn_version,
_DEFAULT_CUDNN_VERSION)
environ_cp['TF_CUDNN_VERSION'] = tf_cudnn_version
def is_cuda_compatible(lib, cuda_ver, cudnn_ver):
"""Check compatibility between given library and cudnn/cudart libraries."""
ldd_bin = which('ldd') or '/usr/bin/ldd'
ldd_out = run_shell([ldd_bin, lib], True)
ldd_out = ldd_out.split(os.linesep)
cudnn_pattern = re.compile('.*libcudnn.so\\.?(.*) =>.*$')
cuda_pattern = re.compile('.*libcudart.so\\.?(.*) =>.*$')
cudnn = None
cudart = None
cudnn_ok = True # assume no cudnn dependency by default
cuda_ok = True # assume no cuda dependency by default
for line in ldd_out:
if 'libcudnn.so' in line:
cudnn = cudnn_pattern.search(line)
cudnn_ok = False
elif 'libcudart.so' in line:
cudart = cuda_pattern.search(line)
cuda_ok = False
if cudnn and len(cudnn.group(1)):
cudnn = convert_version_to_int(cudnn.group(1))
if cudart and len(cudart.group(1)):
cudart = convert_version_to_int(cudart.group(1))
if cudnn is not None:
cudnn_ok = (cudnn == cudnn_ver)
if cudart is not None:
cuda_ok = (cudart == cuda_ver)
return cudnn_ok and cuda_ok
def get_native_cuda_compute_capabilities(environ_cp):
"""Get native cuda compute capabilities.
Args:
environ_cp: copy of the os.environ.
Returns:
string of native cuda compute capabilities, separated by comma.
"""
device_query_bin = os.path.join(
environ_cp.get('CUDA_TOOLKIT_PATH'), 'extras/demo_suite/deviceQuery')
if os.path.isfile(device_query_bin) and os.access(device_query_bin, os.X_OK):
try:
output = run_shell(device_query_bin).split('\n')
pattern = re.compile('[0-9]*\\.[0-9]*')
output = [pattern.search(x) for x in output if 'Capability' in x]
output = ','.join(x.group() for x in output if x is not None)
except subprocess.CalledProcessError:
output = ''
else:
output = ''
return output
def set_tf_cuda_compute_capabilities(environ_cp):
"""Set TF_CUDA_COMPUTE_CAPABILITIES."""
while True:
native_cuda_compute_capabilities = get_native_cuda_compute_capabilities(
environ_cp)
if not native_cuda_compute_capabilities:
default_cuda_compute_capabilities = _DEFAULT_CUDA_COMPUTE_CAPABILITIES
else:
default_cuda_compute_capabilities = native_cuda_compute_capabilities
ask_cuda_compute_capabilities = (
'Please specify a list of comma-separated CUDA compute capabilities '
'you want to build with.\nYou can find the compute capability of your '
'device at: https://developer.nvidia.com/cuda-gpus. Each capability '
'can be specified as "x.y" or "compute_xy" to include both virtual and'
' binary GPU code, or as "sm_xy" to only include the binary '
'code.\nPlease note that each additional compute capability '
'significantly increases your build time and binary size, and that '
'TensorFlow only supports compute capabilities >= 3.5 [Default is: '
'%s]: ' % default_cuda_compute_capabilities)
tf_cuda_compute_capabilities = get_from_env_or_user_or_default(
environ_cp, 'TF_CUDA_COMPUTE_CAPABILITIES',
ask_cuda_compute_capabilities, default_cuda_compute_capabilities)
# Check whether all capabilities from the input is valid
all_valid = True
# Remove all whitespace characters before splitting the string
# that users may insert by accident, as this will result in error
tf_cuda_compute_capabilities = ''.join(tf_cuda_compute_capabilities.split())
for compute_capability in tf_cuda_compute_capabilities.split(','):
m = re.match('[0-9]+.[0-9]+', compute_capability)
if not m:
# We now support sm_35,sm_50,sm_60,compute_70.
sm_compute_match = re.match('(sm|compute)_?([0-9]+[0-9]+)',
compute_capability)
if not sm_compute_match:
print('Invalid compute capability: %s' % compute_capability)
all_valid = False
else:
ver = int(sm_compute_match.group(2))
if ver < 30:
print(
'ERROR: TensorFlow only supports small CUDA compute'
' capabilities of sm_30 and higher. Please re-specify the list'
' of compute capabilities excluding version %s.' % ver)
all_valid = False
if ver < 35:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than sm_35. Disable XLA when running on older GPUs.')
else:
ver = float(m.group(0))
if ver < 3.0:
print('ERROR: TensorFlow only supports CUDA compute capabilities 3.0 '
'and higher. Please re-specify the list of compute '
'capabilities excluding version %s.' % ver)
all_valid = False
if ver < 3.5:
print('WARNING: XLA does not support CUDA compute capabilities '
'lower than 3.5. Disable XLA when running on older GPUs.')
if all_valid:
break
# Reset and Retry
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = ''
# Set TF_CUDA_COMPUTE_CAPABILITIES
environ_cp['TF_CUDA_COMPUTE_CAPABILITIES'] = tf_cuda_compute_capabilities
write_action_env_to_bazelrc('TF_CUDA_COMPUTE_CAPABILITIES',
tf_cuda_compute_capabilities)
def set_other_cuda_vars(environ_cp):
"""Set other CUDA related variables."""
# If CUDA is enabled, always use GPU during build and test.
if environ_cp.get('TF_CUDA_CLANG') == '1':
write_to_bazelrc('build --config=cuda_clang')
else:
write_to_bazelrc('build --config=cuda')
def set_host_cxx_compiler(environ_cp):
"""Set HOST_CXX_COMPILER."""
default_cxx_host_compiler = which('g++') or ''
host_cxx_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_CXX_COMPILER',
var_default=default_cxx_host_compiler,
ask_for_var=('Please specify which C++ compiler should be used as the '
'host C++ compiler.'),
check_success=os.path.exists,
error_msg='Invalid C++ compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_CXX_COMPILER', host_cxx_compiler)
def set_host_c_compiler(environ_cp):
"""Set HOST_C_COMPILER."""
default_c_host_compiler = which('gcc') or ''
host_c_compiler = prompt_loop_or_load_from_env(
environ_cp,
var_name='HOST_C_COMPILER',
var_default=default_c_host_compiler,
ask_for_var=('Please specify which C compiler should be used as the host '
'C compiler.'),
check_success=os.path.exists,
error_msg='Invalid C compiler path. %s cannot be found.',
)
write_action_env_to_bazelrc('HOST_C_COMPILER', host_c_compiler)
def set_opencl_sdk_root(environ_cp):
"""Set OPENCL SDK ROOT"""
def toolkit_exists(toolkit_path):
"""Check if a CL header path is valid."""
if toolkit_path == '':
return True
if is_linux():
cl_header_path = 'opencl/SDK/include/CL/cl.h'
else:
cl_header_path = ''
cl_path_full = os.path.join(toolkit_path, cl_header_path)
exists = os.path.exists(cl_path_full)
if not exists:
print('Invalid OPENCL SDK ROOT path. %s cannot be found' %
(cl_path_full))
return exists
ocl_sdk_root = prompt_loop_or_load_from_env(
environ_cp,
var_name='OCL_SDK_ROOT',
var_default=_DEFAULT_OCL_SDK_ROOT,
ask_for_var=(
'Please specify the location of opencl SDK install path '
'for ocl headers and libOpenCL.so'),
check_success=toolkit_exists,
error_msg='Invalid OPENCL SDK ROOT path.',
suppress_default_error=True)
write_action_env_to_bazelrc('OCL_SDK_ROOT',
ocl_sdk_root)
def set_gcc_toolchain_path(environ_cp):
"""Set GCC_TOOLCHAIN_PATH."""
def no_check(arg):
return True
gcc_toolchain_path = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_TOOLCHAIN_PATH',
var_default=_DEFAULT_GCC_TOOLCHAIN_PATH,
ask_for_var=(
'Please specify the location of gcc toolchain used by the compiler'),
check_success=no_check,
error_msg='Invalid GCC_TOOLCHAIN path.',
suppress_default_error=True)
write_action_env_to_bazelrc('GCC_TOOLCHAIN_PATH',
gcc_toolchain_path)
return gcc_toolchain_path
def set_gcc_toolchain_target(environ_cp, gcc_toolchain_path):
"""Set GCC_TOOLCHAIN_TARGET."""
if gcc_toolchain_path == "":
return ""
def toolkit_exists(target):
"""Check if a gcc toolchain-target is valid."""
if is_linux():
if target == '':
gcc_bin_path = 'bin/gcc'
else:
gcc_bin_path = 'bin/' + target + '-gcc'
else:
gcc_bin_path = ''
gcc_bin_path_full = os.path.join(gcc_toolchain_path, gcc_bin_path)
exists = os.path.exists(gcc_bin_path_full)
if not exists:
print('Invalid GCC_TOOLCHAIN path and TARGET. %s cannot be found' %
(gcc_bin_path_full))
return exists
gcc_toolchain_target = prompt_loop_or_load_from_env(
environ_cp,
var_name='GCC_TOOLCHAIN_TARGET',
var_default=_DEFAULT_GCC_TOOLCHAIN_TARGET,
ask_for_var=(
'Please specify the target of gcc toolchain (e.g. x86_64-pc-linux) '
'the compiler will use.'),
check_success=toolkit_exists,
error_msg='Invalid GCC_TOOLCHAIN_TARGET',
suppress_default_error=True)
write_action_env_to_bazelrc('GCC_TOOLCHAIN_TARGET',
gcc_toolchain_target)
def set_mpi_home(environ_cp):
"""Set MPI_HOME."""
default_mpi_home = which('mpirun') or which('mpiexec') or ''
default_mpi_home = os.path.dirname(os.path.dirname(default_mpi_home))
def valid_mpi_path(mpi_home):
exists = (
os.path.exists(os.path.join(mpi_home, 'include')) and
(os.path.exists(os.path.join(mpi_home, 'lib')) or
os.path.exists(os.path.join(mpi_home, 'lib64')) or
os.path.exists(os.path.join(mpi_home, 'lib32'))))
if not exists:
print(
'Invalid path to the MPI Toolkit. %s or %s or %s or %s cannot be found'
% (os.path.join(mpi_home, 'include'),
os.path.exists(os.path.join(mpi_home, 'lib')),
os.path.exists(os.path.join(mpi_home, 'lib64')),
os.path.exists(os.path.join(mpi_home, 'lib32'))))
return exists
_ = prompt_loop_or_load_from_env(
environ_cp,
var_name='MPI_HOME',
var_default=default_mpi_home,
ask_for_var='Please specify the MPI toolkit folder.',
check_success=valid_mpi_path,
error_msg='',
suppress_default_error=True)
def set_other_mpi_vars(environ_cp):
"""Set other MPI related variables."""
# Link the MPI header files
mpi_home = environ_cp.get('MPI_HOME')
symlink_force('%s/include/mpi.h' % mpi_home, 'third_party/mpi/mpi.h')
# Determine if we use OpenMPI or MVAPICH, these require different header files
# to be included here to make bazel dependency checker happy
if os.path.exists(os.path.join(mpi_home, 'include/mpi_portable_platform.h')):
symlink_force(
os.path.join(mpi_home, 'include/mpi_portable_platform.h'),
'third_party/mpi/mpi_portable_platform.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=False',
'MPI_LIB_IS_OPENMPI=True')
else:
# MVAPICH / MPICH
symlink_force(
os.path.join(mpi_home, 'include/mpio.h'), 'third_party/mpi/mpio.h')
symlink_force(
os.path.join(mpi_home, 'include/mpicxx.h'), 'third_party/mpi/mpicxx.h')
# TODO(gunan): avoid editing files in configure
sed_in_place('third_party/mpi/mpi.bzl', 'MPI_LIB_IS_OPENMPI=True',
'MPI_LIB_IS_OPENMPI=False')
if os.path.exists(os.path.join(mpi_home, 'lib/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib64/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib64/libmpi.so'), 'third_party/mpi/libmpi.so')
elif os.path.exists(os.path.join(mpi_home, 'lib32/libmpi.so')):
symlink_force(
os.path.join(mpi_home, 'lib32/libmpi.so'), 'third_party/mpi/libmpi.so')
else:
raise ValueError(
'Cannot find the MPI library file in %s/lib or %s/lib64 or %s/lib32' %
(mpi_home, mpi_home, mpi_home))
def set_system_libs_flag(environ_cp):
syslibs = environ_cp.get('TF_SYSTEM_LIBS', '')
if syslibs:
if ',' in syslibs:
syslibs = ','.join(sorted(syslibs.split(',')))
else:
syslibs = ','.join(sorted(syslibs.split()))
write_action_env_to_bazelrc('TF_SYSTEM_LIBS', syslibs)
if 'PREFIX' in environ_cp:
write_to_bazelrc('build --define=PREFIX=%s' % environ_cp['PREFIX'])
if 'LIBDIR' in environ_cp:
write_to_bazelrc('build --define=LIBDIR=%s' % environ_cp['LIBDIR'])
if 'INCLUDEDIR' in environ_cp:
write_to_bazelrc('build --define=INCLUDEDIR=%s' % environ_cp['INCLUDEDIR'])
def set_windows_build_flags(environ_cp):
"""Set Windows specific build options."""
# The non-monolithic build is not supported yet
write_to_bazelrc('build --config monolithic')
# Suppress warning messages
write_to_bazelrc('build --copt=-w --host_copt=-w')
# Fix winsock2.h conflicts
write_to_bazelrc(
'build --copt=-DWIN32_LEAN_AND_MEAN --host_copt=-DWIN32_LEAN_AND_MEAN '
'--copt=-DNOGDI --host_copt=-DNOGDI')
# Output more verbose information when something goes wrong
write_to_bazelrc('build --verbose_failures')
# The host and target platforms are the same in Windows build. So we don't
# have to distinct them. This avoids building the same targets twice.
write_to_bazelrc('build --distinct_host_configuration=false')
if get_var(
environ_cp, 'TF_OVERRIDE_EIGEN_STRONG_INLINE', 'Eigen strong inline',
True, ('Would you like to override eigen strong inline for some C++ '
'compilation to reduce the compilation time?'),
'Eigen strong inline overridden.', 'Not overriding eigen strong inline, '
'some compilations could take more than 20 mins.'):
# Due to a known MSVC compiler issue
# https://github.com/tensorflow/tensorflow/issues/10521
# Overriding eigen strong inline speeds up the compiling of
# conv_grad_ops_3d.cc and conv_ops_3d.cc by 20 minutes,
# but this also hurts the performance. Let users decide what they want.
write_to_bazelrc('build --define=override_eigen_strong_inline=true')
def config_info_line(name, help_text):
"""Helper function to print formatted help text for Bazel config options."""
print('\t--config=%-12s\t# %s' % (name, help_text))
def validate_cuda_config(environ_cp):
"""Run find_cuda_config.py and return cuda_toolkit_path, or None."""
def maybe_encode_env(env):
"""Encodes unicode in env to str on Windows python 2.x."""
if not is_windows() or sys.version_info[0] != 2:
return env
for k, v in env.items():
if isinstance(k, unicode):
k = k.encode('ascii')
if isinstance(v, unicode):
v = v.encode('ascii')
env[k] = v
return env
cuda_libraries = ['cuda', 'cudnn']
if is_linux():
if int(environ_cp.get('TF_NEED_TENSORRT', False)):
cuda_libraries.append('tensorrt')
if environ_cp.get('TF_NCCL_VERSION', None):
cuda_libraries.append('nccl')
paths = glob.glob('**/third_party/gpus/find_cuda_config.py', recursive=True)
if not paths:
raise FileNotFoundError(
"Can't find 'find_cuda_config.py' script inside working directory")
proc = subprocess.Popen(
[environ_cp['PYTHON_BIN_PATH'], paths[0]] + cuda_libraries,
stdout=subprocess.PIPE,
env=maybe_encode_env(environ_cp))
if proc.wait():
# Errors from find_cuda_config.py were sent to stderr.
print('Asking for detailed CUDA configuration...\n')
return False
config = dict(
tuple(line.decode('ascii').rstrip().split(': ')) for line in proc.stdout)
print('Found CUDA %s in:' % config['cuda_version'])
print(' %s' % config['cuda_library_dir'])
print(' %s' % config['cuda_include_dir'])
print('Found cuDNN %s in:' % config['cudnn_version'])
print(' %s' % config['cudnn_library_dir'])
print(' %s' % config['cudnn_include_dir'])
if 'tensorrt_version' in config:
print('Found TensorRT %s in:' % config['tensorrt_version'])
print(' %s' % config['tensorrt_library_dir'])
print(' %s' % config['tensorrt_include_dir'])
if config.get('nccl_version', None):
print('Found NCCL %s in:' % config['nccl_version'])
print(' %s' % config['nccl_library_dir'])
print(' %s' % config['nccl_include_dir'])
print('\n')
environ_cp['CUDA_TOOLKIT_PATH'] = config['cuda_toolkit_path']
return True
def main():
global _TF_WORKSPACE_ROOT
global _TF_BAZELRC
global _TF_CURRENT_BAZEL_VERSION
parser = argparse.ArgumentParser()
parser.add_argument(
'--workspace',
type=str,
default=os.path.abspath(os.path.dirname(__file__)),
help='The absolute path to your active Bazel workspace.')
args = parser.parse_args()
_TF_WORKSPACE_ROOT = args.workspace
_TF_BAZELRC = os.path.join(_TF_WORKSPACE_ROOT, _TF_BAZELRC_FILENAME)
# Make a copy of os.environ to be clear when functions and getting and setting
# environment variables.
environ_cp = dict(os.environ)
current_bazel_version = check_bazel_version('3.1.0', '3.7.0')
_TF_CURRENT_BAZEL_VERSION = convert_version_to_int(current_bazel_version)
reset_tf_configure_bazelrc()
cleanup_makefile()
setup_python(environ_cp)
create_build_configuration(environ_cp)
if is_windows():
environ_cp['TF_DOWNLOAD_CLANG'] = '0'
environ_cp['TF_NEED_MPI'] = '0'
# The numpy package on ppc64le uses OpenBLAS which has multi-threading
# issues that lead to incorrect answers. Set OMP_NUM_THREADS=1 at
# runtime to allow the Tensorflow testcases which compare numpy
# results to Tensorflow results to succeed.
if is_ppc64le():
write_action_env_to_bazelrc('OMP_NUM_THREADS', 1)
environ_cp['TF_NEED_CUDA'] = str(
int(get_var(environ_cp, 'TF_NEED_CUDA', 'CUDA', False)))
if (environ_cp.get('TF_NEED_CUDA') == '1' and
'TF_CUDA_CONFIG_REPO' not in environ_cp):
environ_save = dict(environ_cp)
for _ in range(_DEFAULT_PROMPT_ASK_ATTEMPTS):
if validate_cuda_config(environ_cp):
cuda_env_names = [
'TF_CUDA_VERSION',
'TF_CUDNN_VERSION',
'TF_CUDA_PATHS',
# Items below are for backwards compatibility when not using
# TF_CUDA_PATHS.
'CUDA_TOOLKIT_PATH',
'CUDNN_INSTALL_PATH',
]
# Note: set_action_env_var above already writes to bazelrc.
for name in cuda_env_names:
if name in environ_cp:
write_action_env_to_bazelrc(name, environ_cp[name])
break
# Restore settings changed below if CUDA config could not be validated.
environ_cp = dict(environ_save)
set_tf_cuda_version(environ_cp)
set_tf_cudnn_version(environ_cp)
if is_linux():
set_tf_tensorrt_version(environ_cp)
set_tf_nccl_version(environ_cp)
set_tf_cuda_paths(environ_cp)
else:
raise UserInputError(
'Invalid CUDA setting were provided %d '
'times in a row. Assuming to be a scripting mistake.' %
_DEFAULT_PROMPT_ASK_ATTEMPTS)
set_tf_cuda_compute_capabilities(environ_cp)
if 'LD_LIBRARY_PATH' in environ_cp and environ_cp.get(
'LD_LIBRARY_PATH') != '1':
write_action_env_to_bazelrc('LD_LIBRARY_PATH',
environ_cp.get('LD_LIBRARY_PATH'))
set_tf_cuda_clang(environ_cp)
if environ_cp.get('TF_CUDA_CLANG') == '1':
# Ask whether we should download the clang toolchain.
set_tf_download_clang(environ_cp)
if environ_cp.get('TF_DOWNLOAD_CLANG') != '1':
# Set up which clang we should use as the cuda / host compiler.
set_clang_cuda_compiler_path(environ_cp)
else:
# Use downloaded LLD for linking.
write_to_bazelrc('build:cuda_clang --config=download_clang_use_lld')
else:
# Set up which gcc nvcc should use as the host compiler
# No need to set this on Windows
if not is_windows():
set_gcc_host_compiler_path(environ_cp)
set_other_cuda_vars(environ_cp)
else:
# CUDA not required. Ask whether we should download the clang toolchain and
# use it for the CPU build.
set_tf_download_clang(environ_cp)
# ROCm / CUDA are mutually exclusive.
# At most 1 GPU platform can be configured.
gpu_platform_count = 0
if environ_cp.get('TF_NEED_CUDA') == '1':
gpu_platform_count += 1
if gpu_platform_count >= 2:
raise UserInputError('CUDA / ROCm are mututally exclusive. '
'At most 1 GPU platform can be configured.')
set_build_var(environ_cp, 'TF_NEED_MPI', 'MPI', 'with_mpi_support', False)
if environ_cp.get('TF_NEED_MPI') == '1':
set_mpi_home(environ_cp)
set_other_mpi_vars(environ_cp)
set_cc_opt_flags(environ_cp)
set_system_libs_flag(environ_cp)
if is_windows():
set_windows_build_flags(environ_cp)
if __name__ == '__main__':
main()
|
py | 1a542dcd87269604e135caed7c91d049c7e3bb27 | def arrayMaximalAdjacentDifference(inputArray):
difference = 0
for x in range(1, len(inputArray)):
tmp = inputArray[x] - inputArray[x - 1]
if tmp < 0:
if (tmp * -1) > difference:
difference = tmp * -1
else:
if tmp > difference:
difference = tmp
return difference
if __name__ == '__main__':
print arrayMaximalAdjacentDifference([2, 4, 1, 0]) |
py | 1a542e39819843eb63afaff5de992f92045e7545 | import os
import localstack_client.config
# LocalStack version
VERSION = '0.10.7'
# constant to represent the "local" region, i.e., local machine
REGION_LOCAL = 'local'
# dev environment
ENV_DEV = 'dev'
# backend service ports, for services that are behind a proxy (counting down from 4566)
DEFAULT_PORT_APIGATEWAY_BACKEND = 4566
DEFAULT_PORT_KINESIS_BACKEND = 4565
DEFAULT_PORT_DYNAMODB_BACKEND = 4564
DEFAULT_PORT_S3_BACKEND = 4563
DEFAULT_PORT_SNS_BACKEND = 4562
DEFAULT_PORT_SQS_BACKEND = 4561
DEFAULT_PORT_ELASTICSEARCH_BACKEND = 4560
DEFAULT_PORT_CLOUDFORMATION_BACKEND = 4559
DEFAULT_PORT_STEPFUNCTIONS_BACKEND = 4558
DEFAULT_PORT_IAM_BACKEND = 4557
DEFAULT_PORT_EC2_BACKEND = 4556
DEFAULT_PORT_KMS_BACKEND = 4555
DEFAULT_PORT_EVENTS_BACKEND = 4554
DEFAULT_PORT_LOGS_BACKEND = 4553
DEFAULT_PORT_WEB_UI = 8080
LOCALHOST = 'localhost'
# version of the Maven dependency with Java utility code
LOCALSTACK_MAVEN_VERSION = '0.2.0'
# map of default service APIs and ports to be spun up (fetch map from localstack_client)
DEFAULT_SERVICE_PORTS = localstack_client.config.get_service_ports()
# host to bind to when starting the services
BIND_HOST = '0.0.0.0'
# AWS user account ID used for tests
if 'TEST_AWS_ACCOUNT_ID' not in os.environ:
os.environ['TEST_AWS_ACCOUNT_ID'] = '000000000000'
TEST_AWS_ACCOUNT_ID = os.environ['TEST_AWS_ACCOUNT_ID']
# root code folder
LOCALSTACK_ROOT_FOLDER = os.path.realpath(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
# virtualenv folder
LOCALSTACK_VENV_FOLDER = os.path.join(LOCALSTACK_ROOT_FOLDER, '.venv')
if not os.path.isdir(LOCALSTACK_VENV_FOLDER):
# assuming this package lives here: <python>/lib/pythonX.X/site-packages/localstack/
LOCALSTACK_VENV_FOLDER = os.path.realpath(os.path.join(LOCALSTACK_ROOT_FOLDER, '..', '..', '..'))
# API Gateway path to indicate a user request sent to the gateway
PATH_USER_REQUEST = '_user_request_'
# name of LocalStack Docker image
DOCKER_IMAGE_NAME = 'localstack/localstack'
# backdoor API path used to retrieve or update config variables
CONFIG_UPDATE_PATH = '/?_config_'
# environment variable name to tag local test runs
ENV_INTERNAL_TEST_RUN = 'LOCALSTACK_INTERNAL_TEST_RUN'
# content types
APPLICATION_AMZ_JSON_1_0 = 'application/x-amz-json-1.0'
APPLICATION_AMZ_JSON_1_1 = 'application/x-amz-json-1.1'
APPLICATION_JSON = 'application/json'
APPLICATION_XML = 'application/xml'
APPLICATION_X_WWW_FORM_URLENCODED = 'application/x-www-form-urlencoded'
# strings to indicate truthy/falsy values
TRUE_STRINGS = ('1', 'true', 'True')
FALSE_STRINGS = ('0', 'false', 'False')
# Lambda defaults
LAMBDA_TEST_ROLE = 'arn:aws:iam::%s:role/lambda-test-role' % TEST_AWS_ACCOUNT_ID
# installation constants
ELASTICSEARCH_JAR_URL = 'https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.7.0.zip'
# See https://docs.aws.amazon.com/ja_jp/elasticsearch-service/latest/developerguide/aes-supported-plugins.html
ELASTICSEARCH_PLUGIN_LIST = ['analysis-icu', 'ingest-attachment', 'analysis-kuromoji',
'mapper-murmur3', 'mapper-size', 'analysis-phonetic', 'analysis-smartcn', 'analysis-stempel', 'analysis-ukrainian']
# Default ES modules to exclude (save apprx 66MB in the final image)
ELASTICSEARCH_DELETE_MODULES = ['ingest-geoip']
ELASTICMQ_JAR_URL = 'https://s3-eu-west-1.amazonaws.com/softwaremill-public/elasticmq-server-0.15.2.jar'
STS_JAR_URL = 'https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-sts/1.11.14/aws-java-sdk-sts-1.11.14.jar'
STEPFUNCTIONS_ZIP_URL = 'https://s3.amazonaws.com/stepfunctionslocal/StepFunctionsLocal.zip'
KMS_URL_PATTERN = 'https://s3-eu-west-2.amazonaws.com/local-kms/localstack/v3/local-kms.<arch>.bin'
# TODO: Temporarily using a fixed version of DDB in Alpine, as we're hitting a SIGSEGV JVM crash with latest
DYNAMODB_JAR_URL_ALPINE = 'https://github.com/whummer/dynamodb-local/raw/master/etc/DynamoDBLocal.zip'
DYNAMODB_JAR_URL = 'https://s3-us-west-2.amazonaws.com/dynamodb-local/dynamodb_local_latest.zip'
# API endpoint for analytics events
API_ENDPOINT = os.environ.get('API_ENDPOINT') or 'https://api.localstack.cloud/v1'
# environment variable to indicates that this process is running the Web UI
LOCALSTACK_WEB_PROCESS = 'LOCALSTACK_WEB_PROCESS'
LOCALSTACK_INFRA_PROCESS = 'LOCALSTACK_INFRA_PROCESS'
# Hardcoded AWS account ID used by moto
MOTO_ACCOUNT_ID = '123456789012'
# Default lambda registry
DEFAULT_LAMBDA_CONTAINER_REGISTRY = 'lambci/lambda'
|
py | 1a542e6c30873f89fb6473bdbcca982b594d9d08 | """
Easily manage a complex pipeline with python.
============================================================================
AUTHOR: Michael D Dacre, [email protected]
ORGANIZATION: Stanford University
LICENSE: MIT License, property of Stanford, use as you wish
VERSION: 0.8.1
CREATED: 2016-14-15 16:01
Last modified: 2016-01-27 17:18
DESCRIPTION: Classes and functions to make running a pipeline easy.
Create a Pipeline object to hold your entire project, it
will auto-save to the specified file (with pickle). Then add
shell commands, shell scripts, functions, or sub-pipelines to
the Pipeline. Steps can be run simply or on a file list, which
can be generated on the fly using a regular expression.
USAGE EXAMPLE: import pipeline as pl
pipeline = get_pipeline(file) # file holds a pickled pipeline
pipeline.add('bed_to_vcf', ('bed_file', 'vcf_file'))
pipeline.add('cat bed_file | bed_to_vcf > vcf_file',
name='bed2vcf')
def my_test():
return True if os.path.isfile('vcf_file') else False
pipeline.add('cat bed_file | bed_to_vcf > vcf_file',
name='bed2vcf2', donetest=my_test)
def my_fun(no1, no2):
return no1 + no2
pipeline.add(my_fun, (1, 2))
pipeline.add(print, 'hi', # Only run print('hi') if
pretest=my_test) # my_test returns True
pipeline.run_all()
pipeline['my_fun'].out # Will return 3
TODO: Implement parallel running
============================================================================
"""
# Allow top-level import
from .pl import Pipeline
from .pl import Step
from .pl import Command
from .pl import Function
from .pl import get_pipeline
from .pl import run_cmd
from .pl import run_function
from . import tests
__all__ = ["Pipeline", "Step","Command", "Function", "get_pipeline", "pl",
"tests"]
|
py | 1a542f0aa60296051310f167cf7eaef27d3ea4e0 | import ImageDraw, math
# Coordinates for nice_font_x. There are 238 unique numbers.
nice_font_x = (\
0.16943, 0.11279, 0.08594, 0.19482, 0.19092, 0.08984, 0.12402, \
0.07031, 0.04590, 0.14600, 0.28467, 0.23193, 0.20801, 0.30811, \
0.05029, 0.09277, 0.01025, 0.10742, 0.18066, 0.21680, 0.14355, \
0.37061, 0.15820, 0.20068, 0.27393, 0.38525, 0.42773, 0.50146, \
0.33398, 0.45898, 0.54346, 0.44434, 0.40771, 0.39307, 0.35059, \
0.27734, 0.31934, 0.16602, 0.30078, 0.24902, 0.03564, 0.13818, \
0.07349, 0.39526, 0.11719, 0.42236, 0.49170, 0.40088, 0.36841, \
0.16382, 0.38867, 0.41895, 0.45557, 0.49536, 0.50928, 0.45117, \
0.10132, 0.05811, 0.09668, 0.36426, 0.21094, 0.26587, 0.28760, \
0.13477, 0.60303, 0.67432, 0.56421, 0.52100, 0.55957, 0.67188, \
0.78345, 0.82764, 0.72925, 0.75098, 0.61938, 0.59766, 0.47510, \
0.38037, 0.26953, 0.04297, 0.21338, 0.25879, 0.17236, 0.58496, \
0.14893, 0.18335, 0.31055, 0.24683, 0.22314, 0.29639, 0.43042, \
0.34180, 0.47021, 0.50439, 0.53223, 0.64404, 0.12061, 0.06641, \
0.06055, 0.16162, 0.23535, 0.03125, 0.05371, 0.22754, 0.33154, \
0.35400, 0.31641, 0.12744, 0.52832, 0.08301, 0.00000, 0.37598, \
0.44775, 0.48242, 0.40503, 0.17725, 0.13184, 0.23779, 0.15234, \
0.04053, 0.41211, 0.48486, 0.29102, 0.32324, 0.09912, 0.01270, \
0.33936, 0.29346, 0.51611, 0.43286, 0.49756, 0.28125, 0.39795, \
0.27173, 0.07593, 0.36011, 0.47778, 0.51221, 0.41455, 0.25342, \
0.42480, 0.32886, 0.25586, 0.24463, 0.56689, 0.55054, 0.57617, \
0.61328, 0.43506, 0.34619, 0.24023, 0.44116, 0.58032, 0.64111, \
0.65771, 0.74512, 0.66016, 0.69434, 0.76123, 0.89062, 0.97900, \
0.89355, 0.54688, 0.34839, 0.53809, 0.71655, 0.75562, 0.84106, \
0.90771, 0.88281, 0.95508, 0.85034, 0.85547, 0.60669, 0.66846, \
0.20508, 0.37305, 0.55420, 0.46753, 0.43823, 0.46533, 0.59253, \
0.46167, 0.58789, 0.68262, 0.19800, 0.53442, 0.57129, 0.64990, \
0.07910, 0.62256, 0.70459, 0.18799, 0.02881, 0.22070, 0.52344, \
0.66504, 0.62988, 0.69116, 0.73291, 0.68896, 0.63525, 0.35693, \
0.48730, 0.52588, 0.74121, 0.71338, 0.70068, 0.60034, 0.14062, \
0.68579, 0.70947, 0.59033, 0.20288, 0.02344, 0.61719, 0.00439, \
0.72510, 0.83740, 0.93262, 0.73633, 0.02002, 0.26172, 0.15576, \
0.02637, -0.01514, 0.38281, 0.10376, 0.06274, 0.33691, -0.04590, \
-0.02930, 0.76855, 0.17480, 0.01758, 0.00732, 0.30566, 0.03809)
# Coordinates for nice_font_y. There are 241 unique numbers.
nice_font_y = (\
0.17773, 0.55713, 0.71582, 0.00000, 0.10010, 0.46240, 0.59863, \
-0.01221, 0.19580, 0.26855, 0.44580, 0.51855, 0.72803, -0.10303, \
-0.01514, 0.01440, 0.08301, 0.19873, 0.21533, 0.11133, 0.05908, \
0.32666, 0.33887, 0.38086, 0.54004, 0.27466, 0.69189, 0.73975, \
0.78174, 0.69482, 0.56348, 0.54980, 0.62915, 0.66553, 0.62549, \
0.54688, 0.47168, 0.42334, 0.41211, 0.38623, 0.34229, 0.28076, \
0.10352, 0.20361, 0.05127, 0.39795, 0.54395, 0.67432, 0.67993, \
0.34961, 0.43848, 0.63940, 0.53516, -0.02637, 0.02222, 0.16797, \
0.29858, 0.35205, 0.30396, 0.16260, 0.03418, 0.06250, 0.26343, \
0.29150, 0.15918, 0.01196, 0.31714, 0.06543, 0.09009, 0.15381, \
0.50146, 0.56836, 0.65576, 0.63281, 0.57666, 0.45752, 0.68311, \
0.57275, 0.40088, 0.23730, 0.59619, -0.21045, 0.00439, 0.25928, \
0.48193, 0.58301, 0.44238, 0.60645, 0.46484, 0.52832, 0.11572, \
0.31201, 0.39404, 0.58887, -0.08911, -0.14160, -0.06812, 0.67676, \
0.71875, 0.52222, 0.14966, 0.03003, 0.11865, 0.64600, 0.56006, \
0.45410, 0.62305, 0.15625, 0.41626, 0.51172, 0.61011, 0.66162, \
0.43555, 0.22217, 0.13208, 0.04346, 0.18896, 0.09302, 0.30933, \
0.20947, 0.61426, 0.67017, 0.53271, 0.44824, 0.17139, 0.25195, \
0.37769, 0.70605, 0.42969, 0.47461, 0.40967, 0.23486, 0.24268, \
0.10791, 0.07471, 0.36914, 0.23193, 0.22705, 0.01855, 0.14502, \
0.06860, 0.33496, 0.63672, 0.32397, 0.20654, 0.07935, 0.16553, \
0.08691, 0.36304, 0.32031, 0.30713, 0.47705, 0.65234, 0.60156, \
0.21191, 0.37256, 0.12817, 0.41846, 0.50781, 0.42090, 0.28564, \
0.62061, 0.61694, 0.29395, 0.13574, 0.21753, 0.03760, 0.07080, \
0.38965, 0.49023, 0.51514, -0.09521, 0.00928, -0.13818, -0.17944, \
-0.18262, -0.09888, 0.45093, 0.60425, 0.65869, 0.09668, 0.50391, \
0.33057, 0.27710, 0.14722, 0.04736, 0.49756, 0.36523, 0.35449, \
0.70288, 0.36035, 0.42578, 0.24805, 0.35693, 0.34717, 0.70898, \
0.37549, -0.05566, 0.31445, 0.02759, 0.12280, 0.25586, 0.52588, \
0.64307, -0.19873, -0.12598, -0.13525, 0.18091, 0.13916, 0.45972, \
0.40430, 0.48755, 0.40723, -0.16870, -0.04297, -0.11328, 0.26562, \
0.07715, -0.20117, -0.11548, -0.16162, 0.30127, 0.46924, 0.28320, \
0.00684, 0.05371, 0.27148, 0.21973, 0.49414, -0.00684, 0.69971, \
-0.02246, -0.19019, 0.72314, -0.07422, -0.12061, -0.16626, -0.07666, \
-0.20508, -0.03320, 0.59277)
#
# This array gives the x indices into the x coordinate array nice_font_x.
# Every three numbers represents an index into the nice_font_x array that
# gives the actual coordinate.
nice_triangle_x_index = (\
# Character '!' X coordinate indices.
0, 1, 2, 0, 2, 2, 0, 2, 3, 3, 3, 0,\
4, 5, 5, 5, 4, 4,\
# Character '"' X coordinate indices.
6, 7, 8, 6, 8, 8, 6, 8, 9, 9, 9, 6,\
10, 11, 12, 10, 12, 12, 10, 12, 13, 13, 13, 10,\
# Character '#' X coordinate indices.
6, 14, 15, 15, 16, 16, 15, 16, 17, 6, 15, 17,\
6, 17, 18, 19, 18, 17, 19, 17, 20, 20, 16, 16,\
19, 20, 16, 21, 19, 16, 21, 16, 22, 22, 23, 24,\
22, 24, 11, 21, 22, 11, 21, 11, 25, 21, 25, 26,\
21, 26, 27, 28, 21, 27, 28, 27, 29, 29, 30, 30,\
29, 30, 31, 28, 29, 31, 28, 31, 32, 32, 30, 30,\
28, 32, 30, 18, 28, 30, 18, 30, 33, 33, 34, 35,\
33, 35, 36, 18, 33, 36, 18, 36, 37, 18, 37, 6,\
# Character '$' X coordinate indices.
38, 39, 39, 39, 20, 7, 39, 7, 40, 40, 6, 22,\
39, 40, 22, 39, 22, 39, 38, 39, 39, 38, 38, 39,\
38, 39, 39, 38, 39, 41, 38, 41, 42, 38, 42, 14,\
43, 38, 14, 14, 44, 39, 39, 39, 38, 14, 39, 38,\
14, 38, 38, 38, 45, 46, 46, 47, 48, 38, 46, 48,\
38, 48, 38, 38, 38, 39, 38, 39, 0, 14, 38, 0,\
14, 0, 41, 14, 41, 49, 43, 14, 49, 43, 49, 39,\
39, 39, 38, 39, 38, 38, 43, 39, 38, 43, 38, 50,\
51, 43, 50, 51, 50, 52, 51, 52, 53, 25, 51, 53,\
25, 53, 54, 25, 54, 55, 38, 25, 55, 39, 38, 55,\
39, 55, 38, 39, 38, 38,
# Character '%' X coordinate indices.
56, 57, 58, 58, 12, 36, 36, 59, 36, 36, 12, 60,\
36, 60, 61, 36, 36, 61, 36, 61, 62, 36, 62, 61,\
36, 61, 60, 58, 36, 60, 58, 60, 22, 56, 58, 22,\
56, 22, 63, 56, 63, 22, 56, 22, 60, 60, 12, 56,\
10, 60, 64, 64, 65, 10, 66, 67, 68, 68, 69, 70,\
70, 71, 70, 70, 65, 65, 70, 65, 72, 70, 70, 72,\
70, 72, 73, 70, 73, 72, 70, 72, 65, 68, 70, 65,\
68, 65, 74, 66, 68, 74, 66, 74, 75, 66, 75, 74,\
66, 74, 65, 65, 65, 66,
# Character '&' X coordinate indices.
76, 77, 78, 78, 58, 79, 79, 2, 80, 79, 80, 81,\
79, 81, 49, 79, 49, 41, 79, 41, 82, 78, 79, 82,\
78, 82, 78, 76, 78, 78, 76, 78, 34, 76, 34, 51,\
83, 76, 51, 83, 51, 81, 83, 81, 80, 83, 80, 84,\
83, 84, 63, 63, 85, 86, 86, 13, 87, 63, 86, 87,\
63, 87, 88, 63, 88, 11, 83, 63, 11, 83, 11, 81,\
83, 81, 89, 59, 13, 86, 59, 86, 90, 25, 59, 90,\
21, 25, 90, 21, 90, 76, 89, 21, 76, 89, 76, 91,\
83, 89, 91, 83, 91, 92, 92, 93, 75, 92, 75, 94,\
83, 92, 94, 94, 95, 83,
# Character ''' X coordinate indices.
96, 97, 8, 96, 8, 8, 96, 8, 9, 9, 9, 96,\
# Character '(' X coordinate indices.
89, 11, 1, 89, 1, 98, 98, 58, 11, 11, 89, 19,\
98, 11, 19, 98, 19, 82, 98, 82, 84, 98, 84, 89,\
# Character ')' X coordinate indices.
88, 56, 40, 88, 40, 18, 88, 18, 99, 44, 40, 56,\
99, 44, 56, 99, 56, 100, 88, 99, 100, 100, 24, 88,\
# Character '*' X coordinate indices.
84, 101, 102, 84, 102, 37, 37, 22, 103, 84, 37, 103,\
84, 103, 19, 84, 19, 104, 84, 104, 105, 84, 105, 11,\
84, 11, 106, 106, 81, 4, 84, 106, 4, 84, 4, 107,\
107, 7, 84,\
# Character '+' X coordinate indices.
28, 39, 39, 39, 102, 102, 39, 102, 39, 28, 39, 39,\
28, 39, 39, 39, 28, 28, 28, 108, 108, 28, 108, 28,\
39, 28, 28, 39, 28, 28,\
# Character ',' X coordinate indices.
41, 5, 5, 41, 5, 4, 41, 4, 4, 41, 4, 0,\
0, 17, 109, 0, 109, 6, 0, 6, 41,\
# Character '-' X coordinate indices.
38, 101, 101, 101, 38, 38,\
# Character '.' X coordinate indices.
4, 5, 5, 5, 4, 4,\
# Character '/' X coordinate indices.
7, 110, 12, 12, 35, 7,\
# Character '0' X coordinate indices.
1, 79, 97, 1, 97, 9, 9, 24, 111, 9, 111, 112,\
112, 46, 54, 112, 54, 113, 112, 113, 114, 114, 24, 24,\
114, 24, 111, 114, 111, 51, 112, 114, 51, 112, 51, 111,\
112, 111, 24, 9, 112, 24, 9, 24, 115, 9, 115, 116,\
1, 9, 116, 1, 116, 82, 1, 82, 24, 24, 24, 1,\
# Character '1' X coordinate indices.
21, 21, 10, 21, 10, 10, 21, 10, 23, 21, 23, 17,\
17, 17, 117, 21, 17, 117, 117, 106, 21,\
# Character '2' X coordinate indices.
118, 27, 27, 118, 27, 101, 118, 101, 119, 118, 119, 58,\
118, 58, 80, 118, 80, 21, 115, 41, 8, 115, 8, 44,\
35, 115, 44, 35, 44, 35, 111, 35, 35, 111, 35, 31,\
120, 111, 31, 120, 31, 27, 21, 120, 27, 21, 27, 121,\
21, 121, 26, 21, 26, 122, 21, 122, 4, 21, 4, 118,\
# Character '3' X coordinate indices.
1, 79, 116, 1, 116, 18, 1, 18, 78, 35, 19, 88,\
111, 35, 88, 111, 88, 117, 111, 117, 91, 51, 111, 91,\
18, 41, 14, 18, 14, 6, 78, 18, 6, 78, 6, 61,\
34, 78, 61, 34, 61, 111, 25, 34, 111, 25, 111, 55,\
91, 25, 55, 91, 55, 76, 91, 76, 55, 91, 55, 111,\
51, 91, 111, 51, 111, 76, 111, 51, 76, 111, 76, 54,\
111, 54, 31, 78, 111, 31, 1, 78, 31, 31, 78, 1,\
# Character '4' X coordinate indices.
120, 123, 123, 120, 123, 123, 120, 123, 123, 124, 123, 123,\
124, 123, 125, 123, 124, 125, 123, 125, 125, 123, 125, 126,\
120, 123, 126, 126, 120, 120, 120, 54, 54, 120, 54, 120,\
126, 120, 120, 126, 120, 120,
# Character '5' X coordinate indices.
1, 79, 63, 1, 63, 18, 1, 18, 78, 3, 41, 57,\
107, 113, 113, 107, 113, 3, 57, 107, 3, 57, 3, 22,\
3, 57, 22, 78, 3, 22, 78, 22, 127, 77, 78, 127,\
77, 127, 55, 45, 77, 55, 45, 55, 128, 77, 45, 128,\
77, 128, 29, 78, 77, 29, 1, 78, 29, 29, 78, 1,\
# Character '6' X coordinate indices.
129, 130, 120, 129, 120, 111, 129, 111, 62, 6, 18, 41,\
131, 18, 6, 131, 6, 23, 77, 131, 23, 77, 23, 38,\
77, 38, 112, 45, 77, 112, 45, 112, 54, 77, 45, 54,\
77, 54, 113, 10, 77, 113, 10, 113, 47, 60, 10, 47,\
60, 47, 10, 22, 60, 10, 22, 10, 17, 41, 22, 17,\
6, 41, 17, 6, 17, 40, 84, 6, 40, 84, 40, 1,\
19, 84, 1, 62, 19, 1, 129, 62, 1, 1, 89, 129,\
# Character '7' X coordinate indices.
132, 8, 8, 132, 8, 54, 132, 54, 54, 132, 54, 111,\
132, 111, 78, 78, 117, 9, 132, 78, 9, 132, 9, 18,\
18, 133, 132,\
# Character '8' X coordinate indices.
115, 4, 35, 134, 115, 35, 134, 35, 24, 134, 24, 82,\
134, 82, 116, 134, 116, 84, 77, 24, 35, 77, 35, 135,\
45, 77, 135, 22, 4, 115, 22, 115, 58, 4, 22, 58,\
4, 58, 7, 4, 7, 6, 24, 4, 6, 24, 6, 24,\
135, 24, 24, 135, 24, 26, 33, 135, 26, 33, 26, 113,\
135, 33, 113, 135, 113, 52, 135, 52, 111, 45, 135, 111,\
45, 111, 136, 77, 45, 136, 77, 136, 137, 77, 137, 112,\
35, 77, 112, 35, 112, 35, 23, 35, 35, 84, 23, 35,\
84, 35, 17, 134, 84, 17, 17, 119, 134,\
# Character '9' X coordinate indices.
44, 102, 41, 44, 41, 18, 44, 18, 81, 45, 111, 138,\
24, 111, 45, 24, 45, 34, 82, 24, 34, 82, 34, 139,\
82, 139, 56, 116, 82, 56, 116, 56, 79, 115, 116, 79,\
115, 79, 17, 35, 115, 17, 35, 17, 61, 111, 35, 61,\
111, 61, 33, 138, 111, 33, 138, 33, 113, 45, 138, 113,\
140, 45, 113, 120, 140, 113, 120, 113, 137, 77, 120, 137,\
77, 137, 113, 141, 77, 113, 141, 113, 33, 81, 141, 33,\
44, 81, 33, 33, 142, 44,\
# Character ':' X coordinate indices.
4, 5, 5, 5, 4, 4, 4, 5, 5, 5, 4, 4,\
# Character '' X coordinate indices.
4, 5, 5, 5, 4, 4, 41, 5, 5, 41, 5, 4,\
41, 4, 4, 41, 4, 0, 0, 17, 109, 0, 109, 6,\
0, 6, 41,\
# Character '<' X coordinate indices.
108, 102, 102, 102, 108, 108, 102, 108, 118, 108, 102, 118,\
118, 108, 108,\
# Character '=' X coordinate indices.
108, 108, 102, 102, 102, 108, 108, 108, 102, 102, 102, 108,\
# Character '>' X coordinate indices.
108, 108, 102, 102, 102, 90, 108, 102, 90, 108, 90, 102,\
102, 102, 108,\
# Character '?' X coordinate indices.
106, 11, 11, 106, 11, 143, 106, 143, 35, 85, 63, 8,\
85, 8, 44, 35, 85, 44, 35, 44, 24, 111, 35, 24,\
111, 24, 31, 138, 111, 31, 138, 31, 93, 47, 138, 93,\
91, 47, 93, 35, 91, 93, 35, 93, 113, 35, 113, 132,\
35, 132, 91, 35, 91, 123, 35, 123, 106, 123, 88, 88,\
88, 123, 123,\
# Character '@' X coordinate indices.
144, 145, 75, 146, 144, 75, 146, 75, 147, 46, 145, 144,\
148, 46, 144, 148, 144, 53, 59, 148, 53, 59, 53, 51,\
59, 51, 28, 104, 59, 28, 104, 28, 61, 149, 104, 61,\
149, 61, 150, 50, 149, 150, 50, 150, 131, 151, 50, 131,\
151, 131, 77, 27, 151, 77, 27, 77, 46, 152, 27, 46,\
152, 46, 146, 147, 152, 146, 147, 146, 153, 146, 147, 153,\
146, 153, 154, 146, 154, 155, 146, 155, 65, 146, 65, 156,\
146, 156, 69, 146, 69, 157, 158, 159, 160, 158, 160, 161,\
162, 158, 161, 162, 161, 73, 141, 162, 73, 141, 73, 162,\
141, 162, 163, 115, 141, 163, 115, 163, 4, 107, 115, 4,\
107, 4, 58, 107, 58, 102, 115, 107, 102, 115, 102, 56,\
123, 115, 56, 123, 56, 24, 94, 123, 24, 94, 24, 164,\
165, 94, 164, 165, 164, 166, 167, 165, 166, 167, 166, 168,\
169, 167, 168, 169, 168, 170, 171, 169, 170, 171, 170, 172,\
158, 171, 172, 157, 158, 172, 146, 157, 172, 146, 172, 156,\
156, 173, 146,\
# Character 'A' X coordinate indices.
124, 110, 24, 111, 174, 68, 111, 68, 76, 76, 115, 175,\
76, 175, 112, 111, 76, 112, 111, 112, 176, 111, 176, 123,\
24, 111, 123, 124, 24, 123, 123, 10, 175, 124, 123, 175,\
175, 115, 124,\
# Character 'B' X coordinate indices.
42, 91, 76, 42, 76, 145, 145, 152, 177, 145, 177, 136,\
136, 120, 178, 145, 136, 178, 145, 178, 121, 145, 121, 92,\
145, 92, 51, 42, 145, 51, 42, 51, 86, 42, 86, 0,\
42, 42, 0, 42, 0, 0, 123, 120, 136, 0, 123, 136,\
0, 136, 179, 0, 179, 28, 0, 28, 0, 42, 0, 0,\
42, 0, 0, 149, 42, 0, 149, 0, 149, 149, 149, 120,\
149, 120, 180, 53, 179, 136, 128, 53, 136, 128, 136, 152,\
27, 128, 152, 27, 152, 147, 180, 27, 147, 149, 180, 147,\
149, 147, 181, 149, 181, 164, 164, 182, 149,\
# Character 'C' X coordinate indices.
137, 183, 184, 137, 184, 146, 137, 146, 25, 25, 185, 2,\
2, 14, 15, 2, 15, 80, 80, 50, 144, 144, 69, 146,\
144, 146, 93, 144, 93, 25, 80, 144, 25, 80, 25, 39,\
80, 39, 0, 2, 80, 0, 2, 0, 9, 2, 9, 82,\
2, 82, 81, 25, 2, 81, 25, 81, 77, 25, 77, 137,\
# Character 'D' X coordinate indices.
134, 123, 123, 134, 123, 82, 134, 134, 82, 134, 82, 82,\
28, 134, 82, 28, 82, 123, 28, 123, 148, 28, 148, 27,\
31, 123, 123, 31, 123, 55, 186, 31, 55, 186, 55, 177,\
187, 186, 177, 187, 177, 153, 145, 187, 153, 145, 153, 174,\
27, 145, 174, 27, 174, 188, 28, 27, 188, 28, 188, 64,\
28, 64, 186, 186, 55, 28,\
# Character 'E' X coordinate indices.
189, 75, 75, 189, 75, 82, 189, 189, 82, 189, 82, 82,\
82, 187, 187, 82, 187, 82, 189, 82, 82, 189, 82, 82,\
147, 189, 82, 82, 147, 147,\
# Character 'F' X coordinate indices.
115, 109, 109, 109, 66, 66, 109, 66, 115, 109, 115, 115,\
115, 137, 137, 115, 137, 115, 109, 115, 115, 109, 115, 115,\
# Character 'G' X coordinate indices.
190, 120, 120, 190, 120, 165, 190, 165, 165, 165, 187, 51,\
165, 51, 103, 103, 58, 102, 102, 58, 88, 88, 120, 145,\
88, 145, 188, 188, 191, 74, 188, 74, 152, 188, 152, 54,\
88, 188, 54, 88, 54, 120, 88, 120, 38, 88, 38, 88,\
102, 88, 88, 102, 88, 115, 102, 115, 84, 102, 84, 85,\
103, 102, 85, 103, 85, 131, 103, 131, 120, 165, 103, 120,\
165, 120, 94, 165, 94, 190, 165, 190, 190,\
# Character 'H' X coordinate indices.
82, 189, 189, 189, 82, 82, 30, 30, 153, 30, 153, 153,\
153, 30, 30, 30, 153, 30, 82, 30, 30, 82, 30, 82,\
189, 82, 82, 189, 82, 82,\
# Character 'I' X coordinate indices.
192, 15, 15, 15, 192, 192,\
# Character 'J' X coordinate indices.
134, 193, 1, 134, 1, 9, 134, 9, 194, 141, 141, 45,\
36, 141, 45, 36, 45, 45, 131, 36, 45, 131, 45, 47,\
194, 131, 47, 134, 194, 47, 134, 47, 104, 104, 88, 134,\
# Character 'K' X coordinate indices.
0, 42, 42, 42, 0, 0, 0, 195, 188, 0, 188, 34,\
34, 196, 164, 34, 164, 10, 0, 34, 10, 0, 10, 0,\
42, 0, 0, 42, 0, 0,\
# Character 'L' X coordinate indices.
42, 42, 0, 42, 0, 0, 67, 42, 0, 0, 67, 67,\
# Character 'M' X coordinate indices.
37, 42, 42, 19, 25, 45, 45, 29, 197, 166, 166, 196,\
166, 196, 196, 197, 166, 196, 45, 197, 196, 45, 196, 29,\
45, 29, 21, 19, 45, 21, 19, 21, 37, 42, 19, 37,\
42, 37, 37,\
# Character 'N' X coordinate indices.
37, 134, 134, 134, 82, 145, 145, 145, 153, 145, 153, 153,\
134, 145, 153, 134, 153, 30, 134, 30, 37, 134, 37, 37,\
# Character 'O' X coordinate indices.
5, 14, 9, 9, 33, 187, 187, 198, 199, 187, 199, 200,\
200, 66, 50, 200, 50, 50, 200, 50, 66, 200, 66, 201,\
200, 201, 173, 187, 200, 173, 187, 173, 67, 187, 67, 33,\
9, 187, 33, 9, 33, 19, 5, 9, 19, 5, 19, 9,\
5, 9, 19, 5, 19, 50, 5, 50, 50, 50, 12, 5,\
# Character 'P' X coordinate indices.
82, 134, 134, 134, 149, 52, 134, 52, 30, 134, 30, 64,\
64, 190, 66, 66, 105, 82, 66, 82, 82, 66, 82, 202,\
66, 202, 203, 64, 66, 203, 64, 203, 204, 64, 204, 27,\
64, 27, 31, 134, 64, 31, 134, 31, 105, 134, 105, 82,\
134, 82, 82, 134, 82, 82, 134, 82, 82,\
# Character 'Q' X coordinate indices.
74, 205, 206, 74, 206, 68, 207, 74, 68, 207, 68, 25,\
25, 25, 113, 207, 25, 113, 113, 50, 120, 113, 120, 162,\
207, 113, 162, 207, 162, 197, 207, 197, 208, 12, 25, 25,\
12, 25, 12, 209, 12, 12, 209, 12, 2, 209, 2, 79,\
60, 209, 79, 60, 79, 2, 25, 60, 2, 25, 2, 12,\
25, 12, 25, 137, 25, 25, 137, 25, 66, 208, 137, 66,\
207, 208, 66, 207, 66, 210, 210, 72, 207,\
# Character 'R' X coordinate indices.
82, 189, 189, 189, 43, 47, 189, 47, 82, 189, 82, 82,\
128, 47, 43, 128, 43, 30, 177, 128, 30, 177, 30, 74,\
94, 177, 74, 94, 74, 188, 136, 94, 188, 136, 188, 64,\
111, 136, 64, 82, 111, 64, 82, 64, 55, 82, 55, 93,\
82, 93, 83, 83, 211, 212, 83, 212, 53, 83, 53, 26,\
83, 26, 77, 82, 83, 77, 82, 77, 28, 82, 28, 10,\
82, 10, 82, 189, 82, 82, 189, 82, 82,\
# Character 'S' X coordinate indices.
2, 8, 63, 2, 63, 49, 2, 49, 100, 2, 100, 149,\
151, 86, 82, 151, 82, 58, 151, 58, 7, 7, 56, 3,\
7, 3, 123, 123, 180, 68, 123, 68, 181, 181, 27, 55,\
123, 181, 55, 123, 55, 141, 123, 141, 213, 7, 123, 213,\
7, 213, 49, 7, 49, 4, 151, 7, 4, 93, 151, 4,\
93, 4, 28, 93, 28, 46, 195, 93, 46, 195, 46, 83,\
27, 195, 83, 27, 83, 147, 151, 27, 147, 151, 147, 152,\
149, 151, 152, 149, 152, 203, 2, 149, 203, 2, 203, 34,\
34, 192, 2,\
# Character 'T' X coordinate indices.
105, 81, 81, 81, 214, 214, 81, 214, 181, 181, 181, 105,\
81, 181, 105, 81, 105, 105,\
# Character 'U' X coordinate indices.
162, 162, 153, 162, 153, 153, 162, 153, 215, 162, 215, 108,\
108, 135, 185, 108, 185, 17, 17, 189, 189, 17, 189, 82,\
17, 82, 82, 17, 82, 4, 17, 4, 39, 108, 17, 39,\
108, 39, 105, 108, 105, 27, 108, 27, 162,\
# Character 'V' X coordinate indices.
131, 216, 17, 131, 17, 122, 131, 122, 104, 77, 131, 104,\
77, 104, 21, 77, 21, 66, 66, 154, 77,\
# Character 'W' X coordinate indices.
23, 16, 17, 23, 17, 19, 23, 19, 39, 38, 23, 39,\
38, 39, 131, 38, 131, 51, 94, 201, 198, 198, 217, 218,\
198, 218, 219, 198, 219, 220, 198, 220, 95, 94, 198, 95,\
94, 95, 53, 94, 53, 92, 51, 94, 92, 38, 51, 92,\
92, 55, 38,\
# Character 'X' X coordinate indices.
44, 216, 131, 131, 40, 84, 131, 84, 131, 131, 131, 126,\
44, 131, 126, 44, 126, 33, 44, 33, 164, 164, 153, 50,\
50, 156, 30, 50, 30, 59, 50, 59, 28, 164, 50, 28,\
44, 164, 28, 28, 89, 44,\
# Character 'Y' X coordinate indices.
176, 35, 35, 35, 216, 44, 35, 44, 81, 176, 35, 81,\
176, 81, 28, 28, 120, 145, 28, 145, 154, 28, 154, 176,\
28, 176, 176,\
# Character 'Z' X coordinate indices.
83, 221, 221, 221, 25, 29, 29, 98, 98, 29, 98, 146,\
221, 29, 146, 221, 146, 146, 221, 146, 82, 221, 82, 107,\
83, 221, 107, 107, 83, 83,\
# Character '[' X coordinate indices.
97, 222, 222, 97, 222, 223, 97, 97, 223, 97, 223, 223,\
222, 97, 223, 223, 222, 222,\
# Character '\' X coordinate indices.
35, 12, 110, 110, 7, 35,
# Character ']' X coordinate indices.
60, 221, 221, 60, 221, 6, 60, 60, 6, 60, 6, 6,\
60, 6, 221, 221, 221, 60,\
# Character '^' X coordinate indices.
11, 44, 224, 11, 224, 23, 11, 23, 78, 11, 78, 31,\
31, 105, 11,\
# Character '_' X coordinate indices.
144, 225, 225, 225, 144, 144,\
# Character '`' X coordinate indices.
99, 103, 22, 22, 79, 99,\
# Character 'a' X coordinate indices.
114, 86, 80, 114, 80, 109, 109, 40, 102, 109, 102, 124,\
109, 124, 49, 150, 43, 43, 49, 150, 43, 49, 43, 139,\
49, 139, 115, 49, 115, 20, 109, 49, 20, 109, 20, 116,\
109, 116, 22, 109, 22, 100, 114, 109, 100, 114, 100, 123,\
114, 123, 226, 114, 226, 43, 45, 114, 43, 45, 43, 43,\
45, 43, 43, 45, 43, 132, 18, 41, 14, 18, 14, 5,\
24, 18, 5, 24, 5, 0, 21, 24, 0, 21, 0, 10,\
21, 10, 33, 132, 21, 33, 45, 132, 33, 45, 33, 52,\
45, 52, 113, 45, 113, 121, 45, 121, 121, 45, 121, 46,\
46, 137, 45,\
# Character 'b' X coordinate indices.
9, 9, 97, 9, 97, 97, 97, 118, 118, 118, 192, 9,\
97, 118, 9, 9, 97, 9, 9, 9, 0, 9, 0, 10,\
62, 192, 118, 62, 118, 89, 25, 62, 89, 25, 89, 25,\
140, 25, 25, 140, 25, 52, 140, 52, 27, 25, 140, 27,\
25, 27, 128, 25, 128, 55, 10, 25, 55, 10, 55, 122,\
10, 122, 9,\
# Character 'c' X coordinate indices.
135, 114, 46, 135, 46, 51, 135, 51, 24, 227, 119, 97,\
227, 97, 118, 118, 24, 120, 120, 113, 43, 120, 43, 105,\
120, 105, 35, 118, 120, 35, 118, 35, 0, 118, 0, 107,\
227, 118, 107, 227, 107, 0, 24, 227, 0, 24, 0, 24,\
24, 24, 135,\
# Character 'd' X coordinate indices.
113, 47, 47, 113, 47, 59, 113, 59, 114, 113, 114, 59,\
61, 59, 47, 61, 47, 81, 37, 61, 81, 37, 81, 20,\
6, 37, 20, 6, 20, 228, 49, 6, 228, 49, 228, 40,\
49, 40, 57, 222, 49, 57, 222, 57, 41, 222, 41, 139,\
59, 222, 139, 59, 139, 229, 113, 59, 229, 113, 229, 43,\
113, 43, 43, 43, 113, 113,\
# Character 'e' X coordinate indices.
48, 45, 137, 48, 137, 90, 48, 90, 10, 10, 56, 40,\
227, 35, 131, 227, 131, 115, 40, 227, 115, 40, 115, 116,\
40, 116, 45, 50, 131, 35, 50, 35, 112, 45, 50, 112,\
45, 112, 128, 40, 45, 128, 40, 128, 128, 40, 128, 107,\
40, 107, 115, 10, 40, 115, 10, 115, 10, 10, 10, 48,\
# Character 'f' X coordinate indices.
82, 2, 2, 2, 16, 16, 2, 16, 2, 82, 2, 2,\
82, 2, 2, 82, 2, 58, 82, 58, 209, 209, 11, 86,\
209, 86, 38, 209, 38, 39, 209, 39, 4, 209, 4, 82,\
209, 82, 82, 82, 24, 24, 82, 24, 82, 209, 82, 82,\
209, 82, 82,\
# Character 'g' X coordinate indices.
227, 14, 63, 227, 63, 37, 227, 37, 139, 47, 59, 32,\
61, 59, 47, 61, 47, 142, 49, 61, 142, 49, 142, 5,\
6, 49, 5, 6, 5, 101, 49, 6, 101, 49, 101, 57,\
222, 49, 57, 222, 57, 41, 222, 41, 81, 59, 222, 81,\
59, 81, 32, 32, 59, 32, 32, 32, 203, 32, 32, 203,\
47, 32, 203, 33, 47, 203, 33, 203, 203, 34, 33, 203,\
34, 203, 180, 139, 34, 180, 227, 139, 180, 227, 180, 25,\
25, 139, 227,\
# Character 'h' X coordinate indices.
118, 97, 97, 97, 118, 118, 118, 13, 32, 32, 92, 203,\
32, 203, 203, 32, 203, 47, 32, 47, 47, 32, 47, 21,\
32, 21, 122, 118, 32, 122, 118, 122, 19, 118, 19, 0,\
97, 118, 0, 97, 0, 118, 97, 118, 118,\
# Character 'i' X coordinate indices.
118, 97, 97, 97, 118, 118, 118, 97, 97, 97, 118, 118,\
# Character 'j' X coordinate indices.
118, 97, 97, 97, 118, 118, 214, 230, 231, 214, 231, 16,\
214, 16, 14, 97, 97, 118, 14, 97, 118, 14, 118, 118,\
214, 14, 118, 118, 107, 214,\
# Character 'k' X coordinate indices.
118, 97, 97, 97, 118, 118, 118, 59, 76, 118, 76, 35,\
35, 53, 50, 35, 50, 19, 118, 35, 19, 118, 19, 118,\
97, 118, 118, 97, 118, 118,\
# Character 'l' X coordinate indices.
118, 228, 228, 228, 118, 118,\
# Character 'm' X coordinate indices.
118, 97, 97, 97, 9, 9, 118, 97, 9, 9, 12, 38,\
9, 38, 43, 112, 173, 217, 217, 232, 232, 217, 232, 184,\
217, 184, 184, 217, 184, 65, 217, 65, 153, 217, 153, 183,\
112, 217, 183, 112, 183, 130, 112, 130, 182, 43, 112, 182,\
43, 182, 182, 43, 182, 111, 43, 111, 111, 43, 111, 34,\
43, 34, 131, 9, 43, 131, 9, 131, 80, 9, 80, 0,\
9, 0, 118, 9, 118, 118,\
# Character 'n' X coordinate indices.
118, 97, 97, 97, 9, 9, 118, 97, 9, 9, 13, 43,\
9, 43, 52, 52, 113, 203, 52, 203, 203, 52, 203, 47,\
52, 47, 47, 52, 47, 50, 52, 50, 105, 52, 105, 122,\
9, 52, 122, 9, 122, 3, 9, 3, 118, 9, 118, 118,\
# Character 'o' X coordinate indices.
56, 101, 1, 1, 35, 55, 55, 67, 203, 55, 203, 47,\
47, 35, 35, 47, 35, 25, 47, 25, 26, 55, 47, 26,\
55, 26, 25, 55, 25, 35, 1, 55, 35, 1, 35, 37,\
1, 37, 6, 56, 1, 6, 56, 6, 37, 56, 37, 35,\
35, 35, 56,\
# Character 'p' X coordinate indices.
118, 97, 97, 97, 9, 9, 97, 9, 192, 97, 192, 9,\
118, 97, 9, 62, 192, 9, 62, 9, 12, 25, 62, 12,\
25, 12, 89, 25, 89, 120, 26, 25, 120, 26, 120, 46,\
25, 26, 46, 25, 46, 128, 25, 128, 203, 10, 25, 203,\
10, 203, 114, 10, 114, 122, 192, 10, 122, 192, 122, 60,\
9, 192, 60, 9, 60, 118, 9, 118, 118,\
# Character 'q' X coordinate indices.
113, 43, 43, 43, 126, 81, 43, 81, 56, 56, 40, 98,\
56, 98, 209, 209, 139, 114, 114, 59, 222, 209, 114, 222,\
209, 222, 37, 209, 37, 6, 56, 209, 6, 56, 6, 0,\
56, 0, 78, 43, 56, 78, 43, 78, 59, 43, 59, 114,\
113, 43, 114, 114, 59, 114, 113, 114, 114, 113, 114, 114,\
114, 113, 113,\
# Character 'r' X coordinate indices.
118, 97, 97, 97, 9, 9, 118, 97, 9, 9, 23, 142,\
9, 142, 149, 149, 106, 139, 9, 149, 139, 9, 139, 23,\
9, 23, 0, 9, 0, 118, 9, 118, 118,\
# Character 's' X coordinate indices.
58, 101, 44, 58, 44, 22, 58, 22, 139, 139, 96, 97,\
149, 139, 97, 149, 97, 8, 8, 98, 227, 8, 227, 99,\
8, 99, 100, 100, 91, 120, 100, 120, 151, 151, 105, 123,\
100, 151, 123, 100, 123, 143, 100, 143, 22, 8, 100, 22,\
8, 22, 116, 8, 116, 209, 149, 8, 209, 149, 209, 233,\
149, 233, 139, 149, 139, 226, 21, 149, 226, 21, 226, 151,\
91, 21, 151, 91, 151, 182, 91, 182, 148, 139, 91, 148,\
58, 139, 148, 58, 148, 135, 135, 139, 58,\
# Character 't' X coordinate indices.
194, 81, 78, 194, 78, 175, 194, 175, 116, 194, 116, 15,\
15, 109, 109, 109, 234, 234, 109, 234, 109, 15, 109, 109,\
15, 109, 109, 15, 109, 0, 15, 0, 0, 0, 81, 81,\
0, 81, 0, 15, 0, 0, 15, 0, 0, 15, 0, 82,\
15, 82, 4, 15, 4, 194,\
# Character 'u' X coordinate indices.
113, 32, 32, 32, 150, 223, 32, 223, 58, 58, 7, 228,\
58, 228, 228, 58, 228, 118, 58, 118, 118, 58, 118, 22,\
58, 22, 4, 58, 4, 81, 32, 58, 81, 32, 81, 28,\
32, 28, 77, 113, 32, 77, 113, 77, 43, 113, 43, 43,\
43, 113, 113,\
# Character 'v' X coordinate indices.
122, 12, 125, 125, 17, 19, 125, 19, 39, 122, 125, 39,\
122, 39, 10, 122, 10, 132, 132, 203, 122,\
# Character 'w' X coordinate indices.
139, 99, 216, 216, 15, 115, 216, 115, 12, 139, 216, 12,\
139, 12, 11, 139, 11, 106, 106, 32, 113, 106, 113, 54,\
164, 197, 206, 54, 164, 206, 54, 206, 145, 106, 54, 145,\
145, 29, 77, 145, 77, 135, 106, 145, 135, 106, 135, 139,\
# Character 'x' X coordinate indices.
1, 235, 3, 3, 214, 116, 3, 116, 60, 3, 60, 39,\
1, 3, 39, 1, 39, 10, 1, 10, 111, 111, 136, 38,\
38, 46, 25, 38, 25, 35, 38, 35, 39, 111, 38, 39,\
111, 39, 1,\
# Character 'y' X coordinate indices.
44, 98, 14, 44, 14, 56, 44, 56, 84, 44, 84, 115,\
60, 234, 1, 60, 1, 19, 60, 19, 142, 175, 60, 142,\
115, 175, 142, 142, 122, 114, 142, 114, 46, 115, 142, 46,\
44, 115, 46, 44, 46, 127, 44, 127, 143, 143, 4, 44,\
# Character 'z' X coordinate indices.
136, 221, 221, 34, 39, 119, 34, 119, 119, 34, 119, 182,\
221, 34, 182, 221, 182, 182, 221, 182, 18, 221, 18, 107,\
136, 221, 107, 136, 107, 117, 117, 136, 136,\
# Character '(' X coordinate indices.
193, 193, 5, 107, 116, 84, 96, 107, 84, 96, 84, 115,\
96, 115, 88, 88, 10, 86, 88, 86, 86, 88, 86, 89,\
88, 89, 103, 96, 88, 103, 96, 103, 60, 96, 60, 12,\
5, 96, 12, 193, 5, 12, 193, 12, 115, 193, 115, 17,\
193, 17, 192, 192, 60, 80, 89, 86, 86, 11, 89, 86,\
11, 86, 10, 11, 10, 80, 80, 11, 80, 192, 80, 80,\
192, 80, 49, 192, 49, 63, 192, 63, 107, 192, 107, 96,\
192, 96, 5, 192, 5, 193,\
# Character '|' X coordinate indices.
0, 5, 5, 5, 0, 0,\
# Character ')' X coordinate indices.
143, 236, 236, 143, 236, 143, 143, 143, 60, 175, 23, 85,\
60, 175, 85, 60, 85, 223, 60, 223, 1, 143, 60, 1,\
1, 14, 214, 1, 214, 214, 1, 214, 237, 1, 237, 17,\
143, 1, 17, 6, 107, 22, 6, 22, 88, 17, 6, 88,\
143, 17, 88, 143, 88, 9, 143, 9, 6, 237, 214, 214,\
56, 237, 214, 56, 214, 14, 56, 14, 96, 96, 56, 96,\
6, 96, 96, 6, 96, 0, 6, 0, 185, 6, 185, 175,\
6, 175, 60, 6, 60, 143)
#
# This array gives the y indices into the y coordinate array nice_font_y.
# Every three numbers represents an index into the nice_font_y array that
# gives the actual coordinate.
nice_triangle_y_index = (\
# Character '!' Y coordinate indices.
0, 0, 1, 0, 1, 2, 0, 2, 2, 2, 1, 0,\
3, 3, 4, 4, 4, 3,\
# Character '"' Y coordinate indices.
5, 5, 6, 5, 6, 2, 5, 2, 2, 2, 6, 5,\
5, 5, 6, 5, 6, 2, 5, 2, 2, 2, 6, 5,\
# Character '#' Y coordinate indices.
7, 7, 8, 8, 8, 9, 8, 9, 9, 7, 8, 9,\
7, 9, 9, 10, 9, 9, 10, 9, 10, 10, 10, 11,\
10, 10, 11, 10, 10, 11, 10, 11, 11, 11, 12, 12,\
11, 12, 11, 10, 11, 11, 10, 11, 11, 10, 11, 12,\
10, 12, 12, 9, 10, 12, 9, 12, 11, 11, 11, 10,\
11, 10, 10, 9, 11, 10, 9, 10, 9, 9, 9, 8,\
9, 9, 8, 9, 9, 8, 9, 8, 8, 8, 7, 7,\
8, 7, 8, 9, 8, 8, 9, 8, 8, 9, 8, 7,\
# Character '$' Y coordinate indices.
13, 13, 14, 14, 15, 16, 14, 16, 17, 17, 18, 19,\
14, 17, 19, 14, 19, 20, 13, 14, 20, 21, 20, 20,\
21, 20, 22, 21, 22, 23, 21, 23, 10, 21, 10, 24,\
25, 21, 24, 24, 26, 27, 27, 28, 28, 24, 27, 28,\
24, 28, 27, 27, 29, 30, 30, 31, 32, 27, 30, 32,\
27, 32, 33, 27, 33, 33, 27, 33, 34, 24, 27, 34,\
24, 34, 35, 24, 35, 36, 25, 24, 36, 25, 36, 37,\
37, 33, 33, 37, 33, 38, 25, 37, 38, 25, 38, 39,\
8, 25, 39, 8, 39, 40, 8, 40, 41, 42, 8, 41,\
42, 41, 43, 42, 43, 44, 20, 42, 44, 20, 20, 44,\
20, 44, 7, 20, 7, 13,\
# Character '%' Y coordinate indices.
45, 46, 47, 47, 12, 48, 48, 24, 45, 45, 49, 38,\
45, 38, 50, 48, 45, 50, 48, 50, 24, 48, 24, 51,\
48, 51, 33, 47, 48, 33, 47, 33, 51, 45, 47, 51,\
45, 51, 52, 45, 52, 50, 45, 50, 38, 38, 49, 45,\
53, 53, 12, 12, 12, 53, 54, 55, 56, 56, 57, 58,\
58, 59, 54, 54, 53, 60, 54, 60, 61, 58, 54, 61,\
58, 61, 55, 58, 55, 62, 58, 62, 63, 56, 58, 63,\
56, 63, 62, 54, 56, 62, 54, 62, 64, 54, 64, 61,\
54, 61, 60, 60, 53, 54,\
# Character '&' Y coordinate indices.
16, 65, 7, 7, 61, 8, 8, 66, 38, 8, 38, 57,\
8, 57, 25, 8, 25, 43, 8, 43, 19, 7, 8, 19,\
7, 19, 67, 16, 7, 67, 16, 67, 68, 16, 68, 69,\
14, 16, 69, 14, 69, 57, 14, 57, 38, 14, 38, 70,\
14, 70, 71, 71, 48, 12, 12, 72, 73, 71, 12, 73,\
71, 73, 74, 71, 74, 24, 14, 71, 24, 14, 24, 70,\
14, 70, 75, 73, 72, 12, 73, 12, 76, 77, 73, 76,\
11, 77, 76, 11, 76, 74, 75, 11, 74, 75, 74, 78,\
14, 75, 78, 14, 78, 79, 79, 22, 66, 79, 66, 59,\
14, 79, 59, 59, 44, 14,\
# Character ''' Y coordinate indices.
5, 5, 80, 5, 80, 2, 5, 2, 2, 2, 80, 5,\
# Character '(' Y coordinate indices.
81, 81, 82, 81, 82, 83, 83, 84, 12, 12, 12, 85,\
83, 12, 85, 83, 85, 86, 83, 86, 83, 83, 83, 81,\
# Character ')' Y coordinate indices.
82, 81, 81, 82, 81, 83, 82, 83, 86, 85, 12, 12,\
86, 85, 12, 86, 12, 84, 82, 86, 84, 84, 83, 82,\
# Character '*' Y coordinate indices.
1, 85, 72, 1, 72, 87, 87, 12, 12, 1, 87, 12,\
1, 12, 87, 1, 87, 72, 1, 72, 85, 1, 85, 1,\
1, 1, 88, 88, 37, 89, 1, 88, 89, 1, 89, 37,\
37, 88, 1,\
# Character '+' Y coordinate indices.
90, 90, 91, 91, 91, 92, 91, 92, 92, 90, 91, 92,\
90, 92, 93, 93, 93, 92, 92, 92, 91, 92, 91, 91,\
93, 92, 91, 93, 91, 90,\
# Character ',' Y coordinate indices.
3, 3, 4, 3, 4, 4, 3, 4, 3, 3, 3, 94,\
94, 95, 13, 94, 13, 96, 94, 96, 3,\
# Character '-' Y coordinate indices.
18, 18, 58, 58, 58, 18,\
# Character '.' Y coordinate indices.
3, 3, 4, 4, 4, 3,\
# Character '/' Y coordinate indices.
7, 7, 12, 12, 12, 7,\
# Character '0' Y coordinate indices.
61, 57, 1, 61, 1, 97, 97, 98, 29, 97, 29, 32,\
32, 99, 57, 32, 57, 100, 32, 100, 101, 101, 7, 20,\
101, 20, 102, 101, 102, 57, 32, 101, 57, 32, 57, 93,\
32, 93, 103, 97, 32, 103, 97, 103, 80, 97, 80, 57,\
61, 97, 57, 61, 57, 102, 61, 102, 20, 20, 7, 61,\
# Character '1' Y coordinate indices.
98, 3, 3, 98, 3, 104, 98, 104, 70, 98, 70, 105,\
105, 24, 106, 98, 105, 106, 106, 98, 98,\
# Character '2' Y coordinate indices.
16, 16, 3, 16, 3, 3, 16, 3, 20, 16, 20, 107,\
16, 107, 62, 16, 62, 108, 87, 70, 109, 87, 109, 33,\
103, 87, 33, 103, 33, 98, 110, 103, 98, 110, 98, 111,\
99, 110, 111, 99, 111, 11, 108, 99, 11, 108, 11, 112,\
108, 112, 49, 108, 49, 113, 108, 113, 114, 108, 114, 16,\
# Character '3' Y coordinate indices.
115, 116, 17, 115, 17, 117, 115, 117, 20, 49, 22, 108,\
118, 49, 108, 118, 108, 108, 118, 108, 10, 119, 118, 10,\
120, 11, 52, 120, 52, 121, 103, 120, 121, 103, 121, 98,\
120, 103, 98, 120, 98, 26, 52, 120, 26, 52, 26, 34,\
10, 52, 34, 10, 34, 122, 10, 122, 123, 10, 123, 39,\
119, 10, 39, 119, 39, 21, 42, 119, 21, 42, 21, 119,\
42, 119, 44, 20, 42, 44, 115, 20, 44, 44, 7, 115,\
# Character '4' Y coordinate indices.
3, 3, 124, 3, 124, 125, 3, 125, 74, 125, 125, 124,\
125, 124, 124, 74, 125, 124, 74, 124, 125, 74, 125, 2,\
3, 74, 2, 2, 2, 125, 125, 125, 124, 125, 124, 124,\
2, 125, 124, 2, 124, 3,\
# Character '5' Y coordinate indices.
115, 116, 8, 115, 8, 117, 115, 117, 20, 126, 21, 22,\
127, 127, 106, 127, 106, 106, 22, 127, 106, 22, 106, 128,\
126, 22, 128, 45, 126, 128, 45, 128, 129, 57, 45, 129,\
57, 129, 130, 131, 57, 130, 131, 130, 132, 133, 131, 132,\
133, 132, 134, 20, 133, 134, 115, 20, 134, 134, 7, 115,\
# Character '6' Y coordinate indices.
121, 24, 52, 121, 52, 110, 121, 110, 103, 135, 40, 136,\
39, 40, 135, 39, 135, 86, 40, 39, 86, 40, 86, 5,\
40, 5, 45, 137, 40, 45, 137, 45, 136, 42, 137, 136,\
42, 136, 133, 20, 42, 133, 20, 133, 138, 16, 20, 138,\
16, 138, 7, 139, 16, 7, 139, 7, 140, 136, 139, 140,\
135, 136, 140, 135, 140, 141, 52, 135, 141, 52, 141, 142,\
106, 52, 142, 103, 106, 142, 121, 103, 142, 142, 98, 121,\
# Character '7' Y coordinate indices.
106, 106, 127, 106, 127, 127, 106, 127, 51, 106, 51, 10,\
106, 10, 8, 8, 3, 3, 106, 8, 3, 106, 3, 43,\
43, 112, 106,\
# Character '8' Y coordinate indices.
39, 105, 37, 143, 39, 37, 143, 37, 49, 143, 49, 118,\
143, 118, 144, 143, 144, 114, 118, 49, 37, 118, 37, 105,\
43, 118, 105, 52, 105, 39, 52, 39, 10, 120, 52, 10,\
120, 10, 52, 120, 52, 33, 103, 120, 33, 103, 33, 98,\
120, 103, 98, 120, 98, 33, 122, 120, 33, 122, 33, 122,\
105, 122, 122, 105, 122, 10, 105, 10, 39, 43, 105, 39,\
43, 39, 66, 4, 43, 66, 4, 66, 43, 4, 43, 44,\
20, 4, 44, 20, 44, 7, 145, 20, 7, 114, 145, 7,\
114, 7, 44, 143, 114, 44, 44, 144, 143,\
# Character '9' Y coordinate indices.
60, 146, 124, 60, 124, 147, 60, 147, 20, 22, 148, 84,\
149, 148, 22, 149, 22, 9, 148, 149, 9, 148, 9, 132,\
148, 132, 150, 129, 148, 150, 129, 150, 151, 6, 129, 151,\
6, 151, 152, 103, 6, 152, 103, 152, 98, 153, 103, 98,\
153, 98, 48, 84, 153, 48, 84, 48, 77, 22, 84, 77,\
149, 22, 77, 154, 149, 77, 154, 77, 155, 156, 154, 155,\
156, 155, 69, 145, 156, 69, 145, 69, 101, 20, 145, 101,\
60, 20, 101, 101, 7, 60,\
# Character ':' Y coordinate indices.
157, 157, 11, 11, 11, 157, 3, 3, 4, 4, 4, 3,\
# Character '' Y coordinate indices.
157, 157, 11, 11, 11, 157, 3, 3, 4, 3, 4, 4,\
3, 4, 3, 3, 3, 94, 94, 95, 13, 94, 13, 96,\
94, 96, 3,\
# Character '<' Y coordinate indices.
19, 91, 92, 92, 80, 158, 92, 158, 57, 19, 92, 57,\
57, 8, 19,\
# Character '=' Y coordinate indices.
70, 159, 159, 159, 70, 70, 160, 43, 43, 43, 160, 160,\
# Character '>' Y coordinate indices.
92, 91, 19, 19, 8, 57, 92, 19, 57, 92, 57, 158,\
158, 80, 92,\
# Character '?' Y coordinate indices.
0, 0, 43, 0, 43, 63, 0, 63, 49, 161, 109, 99,\
161, 99, 47, 72, 161, 47, 72, 47, 12, 162, 72, 12,\
162, 12, 121, 89, 162, 121, 89, 121, 122, 129, 89, 122,\
38, 129, 122, 49, 38, 122, 49, 122, 10, 49, 10, 49,\
49, 49, 163, 49, 163, 125, 49, 125, 0, 3, 3, 4,\
4, 4, 3,\
# Character '@' Y coordinate indices.
145, 164, 165, 166, 145, 165, 166, 165, 66, 147, 164, 145,\
167, 147, 145, 167, 145, 54, 133, 167, 54, 133, 54, 3,\
133, 3, 54, 154, 133, 54, 154, 54, 4, 58, 154, 4,\
58, 4, 165, 168, 58, 165, 168, 165, 155, 86, 168, 155,\
86, 155, 169, 75, 86, 169, 75, 169, 89, 157, 75, 89,\
157, 89, 158, 66, 157, 158, 66, 158, 50, 166, 66, 50,\
166, 50, 170, 166, 170, 170, 166, 170, 116, 166, 116, 19,\
166, 19, 68, 166, 68, 145, 171, 172, 172, 171, 172, 171,\
173, 171, 171, 173, 171, 174, 171, 173, 174, 171, 174, 81,\
171, 81, 175, 101, 171, 175, 101, 175, 176, 79, 101, 176,\
79, 176, 101, 79, 101, 137, 177, 79, 137, 177, 137, 75,\
178, 177, 75, 178, 75, 179, 72, 178, 179, 72, 179, 12,\
120, 72, 12, 120, 12, 48, 70, 120, 48, 70, 48, 122,\
49, 70, 122, 49, 122, 49, 18, 49, 49, 18, 49, 180,\
19, 18, 180, 145, 19, 180, 166, 145, 180, 166, 180, 3,\
3, 172, 166,\
# Character 'A' Y coordinate indices.
3, 3, 2, 2, 3, 3, 2, 3, 18, 18, 18, 163,\
18, 163, 163, 2, 18, 163, 2, 163, 169, 2, 169, 51,\
2, 2, 51, 3, 2, 51, 51, 181, 163, 3, 51, 163,\
163, 18, 3,\
# Character 'B' Y coordinate indices.
2, 2, 29, 2, 29, 32, 32, 122, 10, 32, 10, 23,\
23, 37, 75, 32, 23, 75, 32, 75, 99, 32, 99, 85,\
32, 85, 106, 2, 32, 106, 2, 106, 73, 2, 73, 73,\
3, 2, 73, 3, 73, 108, 108, 37, 23, 108, 108, 23,\
108, 23, 66, 108, 66, 182, 108, 182, 182, 3, 108, 182,\
3, 182, 16, 3, 3, 16, 3, 16, 16, 3, 16, 147,\
3, 147, 133, 183, 66, 23, 119, 183, 23, 119, 23, 66,\
184, 119, 66, 184, 66, 119, 133, 184, 119, 3, 133, 119,\
3, 119, 90, 3, 90, 185, 185, 65, 3,\
# Character 'C' Y coordinate indices.
90, 125, 137, 90, 137, 185, 90, 185, 7, 7, 60, 124,\
124, 148, 1, 124, 1, 76, 76, 12, 47, 47, 11, 186,\
47, 186, 110, 47, 110, 103, 76, 47, 103, 76, 103, 87,\
76, 87, 70, 124, 76, 70, 124, 70, 148, 124, 148, 43,\
124, 43, 42, 7, 124, 42, 7, 42, 140, 7, 140, 90,\
# Character 'D' Y coordinate indices.
2, 2, 73, 2, 73, 73, 3, 2, 73, 3, 73, 16,\
3, 3, 16, 3, 16, 16, 3, 16, 180, 3, 180, 164,\
162, 73, 2, 162, 2, 127, 52, 162, 127, 52, 127, 72,\
148, 52, 72, 148, 72, 122, 137, 148, 122, 137, 122, 148,\
164, 137, 148, 164, 148, 18, 3, 164, 18, 3, 18, 19,\
3, 19, 185, 185, 65, 3,\
# Character 'E' Y coordinate indices.
2, 2, 73, 2, 73, 73, 3, 2, 73, 3, 73, 38,\
38, 38, 21, 38, 21, 21, 3, 38, 21, 3, 21, 16,\
3, 3, 16, 16, 16, 3,\
# Character 'F' Y coordinate indices.
3, 3, 2, 2, 2, 73, 2, 73, 73, 2, 73, 130,\
130, 130, 21, 130, 21, 21, 2, 130, 21, 2, 21, 3,\
# Character 'G' Y coordinate indices.
41, 41, 148, 41, 148, 187, 41, 187, 4, 4, 15, 7,\
4, 7, 60, 60, 59, 188, 188, 31, 76, 76, 12, 189,\
76, 189, 73, 73, 170, 169, 73, 169, 74, 73, 74, 32,\
76, 73, 32, 76, 32, 103, 76, 103, 32, 76, 32, 74,\
188, 76, 74, 188, 74, 181, 188, 181, 190, 188, 190, 17,\
60, 188, 17, 60, 17, 42, 60, 42, 134, 4, 60, 134,\
4, 134, 180, 4, 180, 184, 4, 184, 41,\
# Character 'H' Y coordinate indices.
3, 3, 2, 2, 2, 37, 37, 2, 2, 37, 2, 3,\
3, 3, 22, 37, 3, 22, 37, 37, 22, 37, 22, 22,\
2, 37, 22, 2, 22, 3,\
# Character 'I' Y coordinate indices.
3, 3, 2, 2, 2, 3,\
# Character 'J' Y coordinate indices.
115, 43, 18, 115, 18, 42, 115, 42, 167, 113, 2, 2,\
114, 113, 2, 114, 2, 137, 68, 114, 137, 68, 137, 68,\
167, 68, 68, 115, 167, 68, 115, 68, 15, 15, 7, 115,\
# Character 'K' Y coordinate indices.
3, 3, 2, 2, 2, 190, 190, 2, 2, 190, 2, 191,\
191, 3, 3, 191, 3, 148, 190, 191, 148, 190, 148, 192,\
2, 190, 192, 2, 192, 3,\
# Character 'L' Y coordinate indices.
3, 2, 2, 3, 2, 16, 3, 3, 16, 16, 16, 3,\
# Character 'M' Y coordinate indices.
3, 3, 2, 2, 119, 42, 42, 165, 2, 2, 3, 3,\
2, 3, 6, 2, 2, 6, 42, 2, 6, 42, 6, 3,\
42, 3, 3, 2, 42, 3, 2, 3, 110, 2, 2, 110,\
2, 110, 3,\
# Character 'N' Y coordinate indices.
3, 3, 2, 2, 2, 69, 69, 2, 2, 69, 2, 3,\
2, 69, 3, 2, 3, 3, 2, 3, 30, 2, 30, 3,\
# Character 'O' Y coordinate indices.
55, 49, 32, 32, 12, 48, 48, 31, 193, 48, 193, 59,\
59, 101, 7, 59, 7, 140, 59, 140, 139, 59, 139, 193,\
59, 193, 109, 48, 59, 109, 48, 109, 110, 48, 110, 103,\
32, 48, 103, 32, 103, 74, 55, 32, 74, 55, 74, 194,\
55, 194, 139, 55, 139, 140, 55, 140, 7, 7, 166, 55,\
# Character 'P' Y coordinate indices.
3, 3, 2, 2, 2, 195, 2, 195, 47, 2, 47, 87,\
87, 158, 188, 188, 63, 63, 188, 63, 196, 188, 196, 196,\
188, 196, 130, 87, 188, 130, 87, 130, 158, 87, 158, 85,\
87, 85, 34, 2, 87, 34, 2, 34, 73, 2, 73, 73,\
2, 73, 196, 2, 196, 63, 2, 63, 3,\
# Character 'Q' Y coordinate indices.
134, 65, 197, 134, 197, 60, 17, 134, 60, 17, 60, 7,\
7, 140, 147, 17, 7, 147, 147, 156, 8, 147, 8, 156,\
17, 147, 156, 17, 156, 190, 17, 190, 109, 139, 140, 7,\
139, 7, 60, 193, 139, 60, 193, 60, 55, 193, 55, 193,\
77, 193, 193, 77, 193, 31, 103, 77, 31, 103, 31, 76,\
103, 76, 12, 110, 103, 12, 110, 12, 48, 109, 110, 48,\
17, 109, 48, 17, 48, 31, 31, 190, 17,\
# Character 'R' Y coordinate indices.
3, 3, 2, 2, 2, 142, 2, 142, 142, 2, 142, 45,\
178, 142, 2, 178, 2, 29, 11, 178, 29, 11, 29, 32,\
75, 11, 32, 75, 32, 11, 38, 75, 11, 38, 11, 92,\
45, 38, 92, 45, 45, 92, 45, 92, 21, 45, 21, 63,\
45, 63, 8, 8, 3, 3, 8, 3, 100, 8, 100, 192,\
8, 192, 163, 45, 8, 163, 45, 163, 198, 45, 198, 66,\
45, 66, 66, 2, 45, 66, 2, 66, 3,\
# Character 'S' Y coordinate indices.
4, 136, 79, 4, 79, 100, 4, 100, 117, 4, 117, 134,\
163, 182, 196, 163, 196, 86, 163, 86, 122, 122, 73, 189,\
122, 189, 12, 12, 189, 34, 12, 34, 170, 170, 158, 110,\
12, 170, 110, 12, 110, 103, 12, 103, 120, 122, 12, 120,\
122, 120, 52, 122, 52, 36, 163, 122, 36, 125, 163, 36,\
125, 36, 37, 125, 37, 126, 8, 125, 126, 8, 126, 58,\
114, 8, 58, 114, 58, 43, 68, 114, 43, 68, 43, 117,\
134, 68, 117, 134, 117, 15, 4, 134, 15, 4, 15, 7,\
7, 15, 4,\
# Character 'T' Y coordinate indices.
3, 3, 73, 73, 73, 2, 73, 2, 2, 2, 73, 73,\
73, 2, 73, 73, 73, 3,\
# Character 'U' Y coordinate indices.
58, 2, 2, 58, 2, 58, 58, 58, 114, 58, 114, 199,\
199, 7, 54, 199, 54, 200, 200, 58, 2, 200, 2, 2,\
200, 2, 58, 200, 58, 146, 200, 146, 180, 199, 200, 180,\
199, 180, 134, 199, 134, 102, 199, 102, 58,\
# Character 'V' Y coordinate indices.
3, 2, 2, 3, 2, 8, 3, 8, 145, 3, 3, 145,\
3, 145, 8, 3, 8, 2, 2, 2, 3,\
# Character 'W' Y coordinate indices.
3, 2, 2, 3, 2, 192, 3, 192, 4, 3, 3, 4,\
3, 4, 136, 3, 136, 2, 2, 57, 4, 4, 201, 2,\
4, 2, 2, 4, 2, 3, 4, 3, 3, 2, 4, 3,\
2, 3, 35, 2, 35, 32, 2, 2, 32, 3, 2, 32,\
32, 35, 3,\
# Character 'X' Y coordinate indices.
3, 3, 155, 155, 2, 2, 155, 2, 122, 155, 122, 10,\
3, 155, 10, 3, 10, 202, 3, 202, 2, 2, 2, 126,\
126, 3, 3, 126, 3, 201, 126, 201, 58, 2, 126, 58,\
3, 2, 58, 58, 192, 3,\
# Character 'Y' Y coordinate indices.
3, 3, 58, 58, 2, 2, 58, 2, 70, 3, 58, 70,\
3, 70, 23, 23, 181, 2, 23, 2, 2, 23, 2, 58,\
23, 58, 3,\
# Character 'Z' Y coordinate indices.
3, 3, 147, 147, 35, 73, 73, 73, 2, 73, 2, 2,\
147, 73, 2, 147, 2, 73, 147, 73, 164, 147, 164, 16,\
3, 147, 16, 16, 16, 3,\
# Character '[' Y coordinate indices.
2, 2, 203, 2, 203, 203, 204, 2, 203, 204, 203, 205,\
204, 204, 205, 205, 205, 204,\
# Character '\' Y coordinate indices.
7, 7, 12, 12, 12, 7,\
# Character ']' Y coordinate indices.
204, 204, 205, 204, 205, 205, 2, 204, 205, 2, 205, 203,\
2, 203, 203, 203, 2, 2,\
# Character '^' Y coordinate indices.
32, 22, 22, 32, 22, 12, 32, 12, 12, 32, 12, 22,\
22, 22, 32,\
# Character '_' Y coordinate indices.
204, 204, 206, 206, 206, 204,\
# Character '`' Y coordinate indices.
98, 85, 85, 85, 98, 98,\
# Character 'a' Y coordinate indices.
61, 82, 7, 61, 7, 101, 101, 164, 144, 101, 144, 201,\
101, 201, 160, 56, 182, 83, 160, 56, 83, 160, 83, 137,\
160, 137, 119, 160, 119, 207, 101, 160, 207, 101, 207, 208,\
101, 208, 145, 101, 145, 20, 61, 101, 20, 61, 20, 145,\
61, 145, 208, 61, 208, 137, 3, 61, 137, 3, 137, 83,\
3, 83, 182, 3, 182, 57, 112, 190, 135, 112, 135, 209,\
75, 112, 209, 75, 209, 109, 128, 75, 109, 128, 109, 89,\
128, 89, 170, 57, 128, 170, 3, 57, 170, 3, 170, 129,\
3, 129, 108, 3, 108, 141, 3, 141, 18, 3, 18, 61,\
61, 3, 3,\
# Character 'b' Y coordinate indices.
67, 3, 3, 67, 3, 2, 2, 2, 5, 5, 130, 62,\
2, 5, 62, 67, 2, 62, 67, 62, 156, 67, 156, 20,\
75, 130, 5, 75, 5, 89, 130, 75, 89, 130, 89, 109,\
83, 130, 109, 83, 109, 75, 83, 75, 155, 19, 83, 155,\
19, 155, 9, 19, 9, 20, 20, 19, 20, 20, 20, 7,\
20, 7, 67,\
# Character 'c' Y coordinate indices.
117, 116, 0, 117, 0, 166, 117, 166, 7, 20, 83, 210,\
20, 210, 186, 186, 89, 211, 211, 187, 57, 211, 57, 128,\
211, 128, 75, 186, 211, 75, 186, 75, 38, 186, 38, 83,\
20, 186, 83, 20, 83, 133, 7, 20, 133, 7, 133, 20,\
7, 20, 117,\
# Character 'd' Y coordinate indices.
3, 3, 67, 3, 67, 133, 3, 133, 125, 3, 125, 130,\
20, 133, 67, 20, 67, 7, 19, 20, 7, 19, 7, 54,\
83, 19, 54, 83, 54, 102, 130, 83, 102, 130, 102, 83,\
130, 83, 45, 75, 130, 45, 75, 45, 186, 75, 186, 89,\
130, 75, 89, 130, 89, 109, 3, 130, 109, 3, 109, 75,\
3, 75, 2, 2, 2, 3,\
# Character 'e' Y coordinate indices.
147, 55, 69, 147, 69, 60, 147, 60, 7, 7, 20, 201,\
75, 89, 75, 75, 75, 108, 201, 75, 108, 201, 108, 118,\
201, 118, 118, 212, 75, 89, 212, 89, 75, 118, 212, 75,\
118, 75, 83, 201, 118, 83, 201, 83, 79, 201, 79, 79,\
201, 79, 133, 7, 201, 133, 7, 133, 20, 7, 20, 147,\
# Character 'f' Y coordinate indices.
3, 3, 123, 123, 123, 11, 123, 11, 11, 3, 123, 11,\
3, 11, 77, 3, 77, 152, 3, 152, 127, 127, 12, 98,\
127, 98, 203, 127, 203, 103, 127, 103, 32, 127, 32, 71,\
127, 71, 11, 11, 11, 123, 11, 123, 123, 127, 11, 123,\
127, 123, 3,\
# Character 'g' Y coordinate indices.
213, 214, 197, 213, 197, 215, 213, 215, 173, 140, 102, 216,\
134, 102, 140, 134, 140, 3, 102, 134, 3, 102, 3, 217,\
9, 102, 217, 9, 217, 62, 130, 9, 62, 130, 62, 45,\
75, 130, 45, 75, 45, 186, 75, 186, 89, 130, 75, 89,\
130, 89, 75, 216, 130, 75, 75, 11, 11, 216, 75, 11,\
140, 216, 11, 214, 140, 11, 214, 11, 140, 215, 214, 140,\
215, 140, 13, 173, 215, 13, 213, 173, 13, 213, 13, 174,\
174, 81, 213,\
# Character 'h' Y coordinate indices.
3, 3, 2, 2, 2, 75, 75, 89, 158, 158, 10, 21,\
158, 21, 3, 158, 3, 3, 158, 3, 21, 158, 21, 37,\
158, 37, 105, 75, 158, 105, 75, 105, 112, 75, 112, 23,\
2, 75, 23, 2, 23, 160, 2, 160, 3,\
# Character 'i' Y coordinate indices.
120, 120, 2, 2, 2, 120, 3, 3, 11, 11, 11, 3,\
# Character 'j' Y coordinate indices.
120, 120, 2, 2, 2, 120, 81, 218, 205, 81, 205, 206,\
81, 206, 219, 53, 11, 11, 219, 53, 11, 219, 11, 53,\
81, 219, 53, 53, 220, 81,\
# Character 'k' Y coordinate indices.
3, 3, 2, 2, 2, 118, 118, 11, 11, 118, 11, 21,\
21, 3, 3, 21, 3, 62, 118, 21, 62, 118, 62, 43,\
2, 118, 43, 2, 43, 3,\
# Character 'l' Y coordinate indices.
3, 3, 2, 2, 2, 3,\
# Character 'm' Y coordinate indices.
3, 3, 11, 11, 11, 10, 3, 11, 10, 10, 158, 89,\
10, 89, 158, 50, 89, 211, 211, 188, 3, 211, 3, 3,\
211, 3, 21, 211, 21, 78, 211, 78, 50, 211, 50, 105,\
50, 211, 105, 50, 105, 108, 50, 108, 221, 158, 50, 221,\
158, 221, 3, 158, 3, 3, 158, 3, 22, 158, 22, 37,\
158, 37, 105, 10, 158, 105, 10, 105, 112, 10, 112, 126,\
10, 126, 9, 10, 9, 3,\
# Character 'n' Y coordinate indices.
3, 3, 11, 11, 11, 10, 3, 11, 10, 10, 89, 109,\
10, 109, 222, 222, 210, 66, 222, 66, 3, 222, 3, 3,\
222, 3, 66, 222, 66, 92, 222, 92, 50, 222, 50, 105,\
10, 222, 105, 10, 105, 157, 10, 157, 223, 10, 223, 3,\
# Character 'o' Y coordinate indices.
20, 83, 36, 36, 89, 209, 209, 9, 133, 209, 133, 138,\
138, 7, 20, 138, 20, 19, 138, 19, 62, 209, 138, 62,\
209, 62, 130, 209, 130, 75, 36, 209, 75, 36, 75, 130,\
36, 130, 83, 20, 36, 83, 20, 83, 19, 20, 19, 20,\
20, 7, 20,\
# Character 'p' Y coordinate indices.
204, 204, 11, 11, 11, 177, 11, 177, 130, 11, 130, 201,\
204, 11, 201, 5, 130, 177, 5, 177, 109, 38, 5, 109,\
38, 109, 89, 38, 89, 186, 62, 38, 186, 62, 186, 45,\
19, 62, 45, 19, 45, 62, 19, 62, 102, 20, 19, 102,\
20, 102, 54, 20, 54, 7, 133, 20, 7, 133, 7, 224,\
201, 133, 224, 201, 224, 225, 201, 225, 204,\
# Character 'q' Y coordinate indices.
204, 204, 225, 225, 172, 7, 225, 7, 61, 61, 62, 210,\
61, 210, 186, 186, 89, 123, 123, 130, 5, 186, 123, 5,\
186, 5, 38, 186, 38, 83, 61, 186, 83, 61, 83, 19,\
61, 19, 20, 225, 61, 20, 225, 20, 133, 225, 133, 125,\
204, 225, 125, 125, 130, 123, 204, 125, 123, 204, 123, 11,\
11, 11, 204,\
# Character 'r' Y coordinate indices.
3, 3, 11, 11, 11, 50, 3, 11, 50, 50, 109, 89,\
50, 89, 70, 70, 157, 50, 50, 70, 50, 50, 50, 37,\
50, 37, 155, 50, 155, 226, 50, 226, 3,\
# Character 's' Y coordinate indices.
101, 69, 55, 101, 55, 68, 101, 68, 20, 227, 62, 91,\
116, 227, 91, 116, 91, 23, 23, 10, 228, 23, 228, 11,\
23, 11, 89, 89, 109, 88, 89, 88, 39, 39, 155, 112,\
89, 39, 112, 89, 112, 75, 89, 75, 50, 23, 89, 50,\
23, 50, 168, 23, 168, 190, 116, 23, 190, 116, 190, 22,\
116, 22, 198, 116, 198, 25, 139, 116, 25, 139, 25, 137,\
147, 139, 137, 147, 137, 69, 147, 69, 140, 20, 147, 140,\
101, 20, 140, 101, 140, 172, 172, 7, 101,\
# Character 't' Y coordinate indices.
134, 145, 3, 134, 3, 229, 134, 229, 172, 134, 172, 185,\
185, 69, 123, 123, 123, 11, 123, 11, 11, 185, 123, 11,\
185, 11, 103, 185, 103, 230, 185, 230, 11, 11, 11, 123,\
11, 123, 123, 185, 11, 123, 185, 123, 139, 185, 139, 4,\
185, 4, 16, 185, 16, 134,\
# Character 'u' Y coordinate indices.
3, 3, 134, 134, 7, 82, 134, 82, 44, 44, 90, 8,\
44, 8, 11, 44, 11, 11, 44, 11, 136, 44, 136, 208,\
44, 208, 16, 44, 16, 61, 134, 44, 61, 134, 61, 16,\
134, 16, 208, 3, 134, 208, 3, 208, 132, 3, 132, 11,\
11, 11, 3,\
# Character 'v' Y coordinate indices.
3, 3, 11, 11, 11, 119, 11, 119, 42, 3, 11, 42,\
3, 42, 43, 3, 43, 11, 11, 11, 3,\
# Character 'w' Y coordinate indices.
3, 3, 11, 11, 11, 165, 11, 165, 133, 3, 11, 133,\
3, 133, 18, 3, 18, 11, 11, 11, 165, 11, 165, 102,\
165, 11, 11, 102, 165, 11, 102, 11, 3, 11, 102, 3,\
3, 3, 91, 3, 91, 45, 11, 3, 45, 11, 45, 3,\
# Character 'x' Y coordinate indices.
3, 3, 9, 9, 11, 11, 9, 11, 45, 9, 45, 22,\
3, 9, 22, 3, 22, 45, 3, 45, 11, 11, 11, 25,\
25, 3, 3, 25, 3, 59, 25, 59, 43, 11, 25, 43,\
11, 43, 3,\
# Character 'y' Y coordinate indices.
81, 204, 219, 81, 219, 205, 81, 205, 215, 81, 215, 94,\
3, 11, 11, 3, 11, 165, 3, 165, 4, 231, 3, 4,\
94, 231, 4, 4, 18, 11, 4, 11, 11, 94, 4, 11,\
81, 94, 11, 81, 11, 229, 81, 229, 205, 205, 232, 81,\
# Character 'z' Y coordinate indices.
3, 3, 167, 123, 10, 10, 123, 10, 11, 123, 11, 11,\
167, 123, 11, 167, 11, 5, 167, 5, 114, 167, 114, 167,\
3, 167, 167, 3, 167, 134, 134, 134, 3,\
# Character '(' Y coordinate indices.
165, 56, 66, 211, 80, 111, 155, 211, 111, 155, 111, 230,\
155, 230, 233, 233, 12, 12, 233, 12, 152, 233, 152, 152,\
233, 152, 73, 155, 233, 73, 155, 73, 31, 155, 31, 168,\
66, 155, 168, 165, 66, 168, 165, 168, 118, 165, 118, 83,\
165, 83, 116, 116, 166, 234, 206, 206, 81, 235, 206, 81,\
235, 81, 81, 235, 81, 218, 234, 235, 218, 116, 234, 218,\
116, 218, 236, 116, 236, 13, 116, 13, 54, 116, 54, 139,\
116, 139, 17, 116, 17, 165,\
# Character '|' Y coordinate indices.
81, 81, 12, 12, 12, 81,\
# Character ')' Y coordinate indices.
149, 56, 165, 149, 165, 17, 149, 17, 139, 101, 237, 95,\
139, 101, 95, 139, 95, 175, 139, 175, 238, 149, 139, 238,\
238, 81, 81, 238, 81, 206, 238, 206, 206, 238, 206, 219,\
149, 238, 219, 239, 200, 119, 239, 119, 83, 219, 239, 83,\
149, 219, 83, 149, 83, 182, 149, 182, 151, 152, 152, 12,\
51, 152, 12, 51, 12, 12, 51, 12, 98, 240, 51, 98,\
151, 240, 98, 151, 98, 76, 151, 76, 161, 151, 161, 228,\
151, 228, 155, 151, 155, 149)
#
# This is an offset from the start of the nice_triangle_x_index and
# nice_triangle_y_index arrays. It gives the start of a glyph's first
# triangle coordinate indices.
#
nice_triangle_start = (\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 18, 42, 138, 264, 366, 480, 492, 516, 540, 579,\
609, 630, 636, 642, 648, 708, 729, 789, 873, 915, 975,\
1059, 1086, 1191, 1281, 1293, 1320, 1335, 1347, 1362, 1425, 1608,\
1647, 1752, 1812, 1878, 1908, 1932, 2013, 2043, 2049, 2085, 2115,\
2127, 2166, 2190, 2250, 2307, 2388, 2469, 2568, 2586, 2631, 2652,\
2703, 2745, 2772, 2802, 2820, 2826, 2844, 2859, 2865, 2871, 2982,\
3045, 3096, 3162, 3222, 3273, 3360, 3405, 3417, 3447, 3477, 3483,\
3561, 3609, 3660, 3729, 3792, 3825, 3918, 3972, 4023, 4044, 4092,\
4131, 4179, 4212, 4302, 4308, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
#
# This table gives the number of triangles in a glyph.
#
nice_triangle_ntriangles = (\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
6, 8, 32, 42, 34, 38, 4, 8, 8, 13, 10,\
7, 2, 2, 2, 20, 7, 20, 28, 14, 20, 28,\
9, 35, 30, 4, 9, 5, 4, 5, 21, 61, 13,\
35, 20, 22, 10, 8, 27, 10, 2, 12, 10, 4,\
13, 8, 20, 19, 27, 27, 33, 6, 15, 7, 17,\
14, 9, 10, 6, 2, 6, 5, 2, 2, 37, 21,\
17, 22, 20, 17, 29, 15, 4, 10, 10, 2, 26,\
16, 17, 23, 21, 11, 31, 18, 17, 7, 16, 13,\
16, 11, 30, 2, 30, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0)
#
# This table gives the space required to display this character.
#
nice_triangle_spacing = (\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.300, 0.278, 0.355, 0.556,\
0.556, 0.889, 0.667, 0.191, 0.333, 0.333, 0.389, 0.584, 0.278,\
0.333, 0.278, 0.278, 0.556, 0.556, 0.556, 0.556, 0.556, 0.556,\
0.556, 0.556, 0.556, 0.556, 0.278, 0.278, 0.584, 0.584, 0.584,\
0.556, 1.015, 0.667, 0.667, 0.722, 0.722, 0.667, 0.611, 0.778,\
0.722, 0.278, 0.500, 0.667, 0.556, 0.833, 0.722, 0.778, 0.667,\
0.778, 0.722, 0.667, 0.611, 0.722, 0.667, 0.944, 0.667, 0.667,\
0.611, 0.278, 0.278, 0.278, 0.469, 0.556, 0.333, 0.556, 0.556,\
0.500, 0.556, 0.556, 0.278, 0.556, 0.556, 0.222, 0.222, 0.500,\
0.222, 0.833, 0.556, 0.556, 0.556, 0.556, 0.333, 0.500, 0.278,\
0.556, 0.500, 0.722, 0.500, 0.500, 0.500, 0.334, 0.260, 0.334,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000, 0.000,\
0.000, 0.000, 0.000, 0.000)
###############################################################################
# Function: drawRotatedText
#
# Purpose: This function draws text into a PIL image.
#
# Arguments:
# im : The image to draw on.
# x0 : The x coordinate of the text in range [0,1].
# y0 : The y coordinate of the text in range [0,1].
# chars : The text to be drawn
# scale : The scale of the text in range [0,1]. It's a percent
# of the image height.
# angleDegrees : The angle of rotation for the text.
# fillColor : A 3-tuple containing rgb color values in range [0,255].
#
# Notes: The (x0,y0) coordinate corresponds to the lower left corner
# of the text to be drawn.
#
# Programmer: Brad Whitlock
# Date: Wed Dec 18 11:33:22 PDT 2002
#
###############################################################################
def drawRotatedText(im, x0, y0, chars, scale, angleDegrees, fillColor):
draw = ImageDraw.Draw(im)
char_base_x = 0.
char_base_y = 0.
screenCenterX = int(x0 * float(im.size[0]))
screenCenterY = int((1. - y0) * float(im.size[1]))
xScale = float(scale) * float(im.size[0])
yScale = float(scale) * float(im.size[1])
angle = math.pi * (angleDegrees / 180.)
#
# Loop over each character plotting each one in the correct position.
#
for c in chars:
ch = ord(c)
ntri = nice_triangle_ntriangles[ch]
# Get the index array of the glyph.
ts = nice_triangle_start[ch]
# Draw the triangles in the glyph.
for j in range(0, ntri):
polygon = []
for k in range(0,3):
# Determine the point in world space
x = nice_font_x[nice_triangle_x_index[ts]] + char_base_x
y = nice_font_y[nice_triangle_y_index[ts]]
# Rotate the point
xr = x
yr = y
if(angle != 0.):
r = math.sqrt(x * x + y * y)
if(x != 0.):
angle0 = math.atan(y / x)
xr = r * math.cos(angle0 + angle)
yr = r * math.sin(angle0 + angle)
# Transform the points to screen space
xs = int(xScale * xr) + screenCenterX
ys = int(yScale * -yr) + screenCenterY
ts = ts + 1
polygon = polygon + [(xs, ys)]
# Draw the triangle.
draw.polygon(polygon, fill=fillColor)
# Update the character position
char_base_x = char_base_x + nice_triangle_spacing[ch]
char_base_y = char_base_y
###############################################################################
# Function: drawText
#
# Purpose: This function draws text into a PIL image.
#
# Arguments:
# im : The image to draw on.
# x0 : The x coordinate of the text in range [0,1].
# y0 : The y coordinate of the text in range [0,1].
# chars : The text to be drawn
# scale : The scale of the text in range [0,1]. It's a percent
# of the image height.
# fillColor : A 3-tuple containing rgb color values in range [0,255].
#
# Notes: The (x0,y0) coordinate corresponds to the lower left corner
# of the text to be drawn.
#
# Programmer: Brad Whitlock
# Date: Wed Dec 18 11:33:22 PDT 2002
#
###############################################################################
def drawText(im, x0, y0, chars, scale, fillColor):
drawRotatedText(im, x0, y0, chars, scale, 0., fillColor)
def calculateTextWidth(astr):
width = 0.
for c in astr:
ch = ord(c)
width = width + nice_triangle_spacing[ch]
return width
def calculateTextWidthInPixels(astr, im, scale):
return (float(scale) / float(im.size[0])) * calculateTextWidth(astr)
|
py | 1a542f395f2fb6a3d59b211d06af2e6773f8b8a0 | from PySide2.QtWidgets import QDialog, QVBoxLayout, QHBoxLayout, QPushButton, QFileDialog, QWidget, QLabel, \
QListWidget, QListWidgetItem
import os
from custom_src.global_tools.Debugger import Debugger
class SelectPackages_Dialog(QDialog):
def __init__(self, parent, packages):
super(SelectPackages_Dialog, self).__init__(parent)
self.file_paths = []
self.required_packages = packages
self.setLayout(QVBoxLayout())
self.layout().addWidget(QLabel('You need to select the locations of the following required node packages'))
# package lists
required_packages_list_widget = QListWidget()
for p in packages:
package_item = QListWidgetItem()
package_item.setText(p)
required_packages_list_widget.addItem(package_item)
selected_items_widget = QWidget()
selected_items_widget.setLayout(QVBoxLayout())
self.selected_packages_list_widget = QListWidget()
selected_items_widget.layout().addWidget(self.selected_packages_list_widget)
auto_import_button = QPushButton('auto import')
auto_import_button.setFocus()
auto_import_button.clicked.connect(self.auto_import_button_clicked)
selected_items_widget.layout().addWidget(auto_import_button)
add_package_button = QPushButton('add')
add_package_button.clicked.connect(self.add_package_button_clicked)
selected_items_widget.layout().addWidget(add_package_button)
clear_package_list_button = QPushButton('clear')
clear_package_list_button.clicked.connect(self.clear_selected_packages_list)
selected_items_widget.layout().addWidget(clear_package_list_button)
finished_button = QPushButton('OK')
finished_button.clicked.connect(self.finished_button_clicked)
selected_items_widget.layout().addWidget(finished_button)
packages_lists_widget = QWidget()
packages_lists_widget.setLayout(QHBoxLayout())
packages_lists_widget.layout().addWidget(required_packages_list_widget)
packages_lists_widget.layout().addWidget(selected_items_widget)
self.layout().addWidget(packages_lists_widget)
self.setWindowTitle('select required packages')
def auto_import_button_clicked(self):
packages_dir = '../packages'
folders_list = [x[0] for x in os.walk(packages_dir) if
os.path.basename(os.path.normpath(x[0])) in self.required_packages]
required_files = self.required_packages.copy()
for folder in folders_list:
for r_f in required_files:
if r_f + '.rpc' in os.listdir(packages_dir + '/' + folder):
self.file_paths.append(os.path.normpath(packages_dir + '/' + folder + '/' + r_f + '.rpc'))
break
self.rebuild_selected_packages_list_widget()
self.clean_packages_list()
if self.all_required_packages_selected():
self.finished()
def add_package_button_clicked(self):
file_names = \
QFileDialog.getOpenFileNames(self, 'select package files', '../packages', 'Ryven Package(*.rpc)')[0]
for file_name in file_names:
try:
f = open(file_name)
f.close()
self.file_paths.append(file_name)
except FileNotFoundError:
Debugger.debug('couldn\'t open file')
self.rebuild_selected_packages_list_widget()
def rebuild_selected_packages_list_widget(self):
# remove all items
self.selected_packages_list_widget.clear()
for f in self.file_paths:
file_item = QListWidgetItem()
file_item.setText(f)
self.selected_packages_list_widget.addItem(file_item)
def clear_selected_packages_list(self):
self.file_paths.clear()
self.rebuild_selected_packages_list_widget()
def finished_button_clicked(self):
self.clean_packages_list()
if self.all_required_packages_selected():
self.finished()
def clean_packages_list(self):
"""remove duplicates from self.file_paths"""
files_dict = {}
for p in self.file_paths:
filename = os.path.splitext(os.path.basename(p))[0]
files_dict[filename] = p
self.file_paths = list(files_dict.values())
self.rebuild_selected_packages_list_widget()
def all_required_packages_selected(self):
files = [os.path.splitext(os.path.basename(path))[0] for path in self.file_paths]
# search for missing packages
for p in self.required_packages:
if p not in files:
return False
return True
def finished(self):
self.accept() |
py | 1a543120049002cd827359817739835b840f0644 | # Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
import copy
import json
import logging
import re
from collections import OrderedDict
from datetime import timedelta
# OAuth2
from oauthlib import oauth2
from oauthlib.common import generate_token
# Django
from django.conf import settings
from django.contrib.auth import update_session_auth_hash
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist, ValidationError as DjangoValidationError
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import force_text
from django.utils.text import capfirst
from django.utils.timezone import now
from django.utils.functional import cached_property
# Django REST Framework
from rest_framework.exceptions import ValidationError, PermissionDenied
from rest_framework.relations import ManyRelatedField
from rest_framework import fields
from rest_framework import serializers
from rest_framework import validators
from rest_framework.utils.serializer_helpers import ReturnList
# Django-Polymorphic
from polymorphic.models import PolymorphicModel
# AWX
from awx.main.access import get_user_capabilities
from awx.main.constants import (
SCHEDULEABLE_PROVIDERS,
ANSI_SGR_PATTERN,
ACTIVE_STATES,
CENSOR_VALUE,
)
from awx.main.models import (
ActivityStream, AdHocCommand, AdHocCommandEvent, Credential, CredentialInputSource,
CredentialType, CustomInventoryScript, Group, Host, Instance,
InstanceGroup, Inventory, InventorySource, InventoryUpdate,
InventoryUpdateEvent, Job, JobEvent, JobHostSummary, JobLaunchConfig,
JobTemplate, Label, Notification, NotificationTemplate,
OAuth2AccessToken, OAuth2Application, Organization, Project,
ProjectUpdate, ProjectUpdateEvent, RefreshToken, Role, Schedule,
SystemJob, SystemJobEvent, SystemJobTemplate, Team, UnifiedJob,
UnifiedJobTemplate, V1Credential, WorkflowJob, WorkflowJobNode,
WorkflowJobTemplate, WorkflowJobTemplateNode, StdoutMaxBytesExceeded
)
from awx.main.models.base import VERBOSITY_CHOICES, NEW_JOB_TYPE_CHOICES
from awx.main.models.rbac import (
get_roles_on_resource, role_summary_fields_generator
)
from awx.main.fields import ImplicitRoleField, JSONBField
from awx.main.utils import (
get_type_for_model, get_model_for_type,
camelcase_to_underscore, getattrd, parse_yaml_or_json,
has_model_field_prefetched, extract_ansible_vars, encrypt_dict,
prefetch_page_capabilities, get_external_account)
from awx.main.utils.filters import SmartFilter
from awx.main.redact import UriCleaner, REPLACE_STR
from awx.main.validators import vars_validate_or_raise
from awx.api.versioning import reverse, get_request_version
from awx.api.fields import (BooleanNullField, CharNullField, ChoiceNullField,
VerbatimField, DeprecatedCredentialField)
logger = logging.getLogger('awx.api.serializers')
# Fields that should be summarized regardless of object type.
DEFAULT_SUMMARY_FIELDS = ('id', 'name', 'description')# , 'created_by', 'modified_by')#, 'type')
# Keys are fields (foreign keys) where, if found on an instance, summary info
# should be added to the serialized data. Values are a tuple of field names on
# the related object to include in the summary data (if the field is present on
# the related object).
SUMMARIZABLE_FK_FIELDS = {
'organization': DEFAULT_SUMMARY_FIELDS,
'user': ('id', 'username', 'first_name', 'last_name'),
'application': ('id', 'name'),
'team': DEFAULT_SUMMARY_FIELDS,
'inventory': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources',
'total_inventory_sources',
'inventory_sources_with_failures',
'organization_id',
'kind',
'insights_credential_id',),
'host': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'has_inventory_sources'),
'group': DEFAULT_SUMMARY_FIELDS + ('has_active_failures',
'total_hosts',
'hosts_with_active_failures',
'total_groups',
'groups_with_active_failures',
'has_inventory_sources'),
'project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'source_project': DEFAULT_SUMMARY_FIELDS + ('status', 'scm_type'),
'project_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed',),
'credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'vault_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'elapsed', 'type'),
'job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job_template': DEFAULT_SUMMARY_FIELDS,
'workflow_job': DEFAULT_SUMMARY_FIELDS,
'schedule': DEFAULT_SUMMARY_FIELDS + ('next_run',),
'unified_job_template': DEFAULT_SUMMARY_FIELDS + ('unified_job_type',),
'last_job': DEFAULT_SUMMARY_FIELDS + ('finished', 'status', 'failed', 'license_error'),
'last_job_host_summary': DEFAULT_SUMMARY_FIELDS + ('failed',),
'last_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_update': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'current_job': DEFAULT_SUMMARY_FIELDS + ('status', 'failed', 'license_error'),
'inventory_source': ('source', 'last_updated', 'status'),
'custom_inventory_script': DEFAULT_SUMMARY_FIELDS,
'source_script': ('name', 'description'),
'role': ('id', 'role_field'),
'notification_template': DEFAULT_SUMMARY_FIELDS,
'instance_group': {'id', 'name', 'controller_id'},
'insights_credential': DEFAULT_SUMMARY_FIELDS,
'source_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
'target_credential': DEFAULT_SUMMARY_FIELDS + ('kind', 'cloud', 'credential_type_id'),
}
def reverse_gfk(content_object, request):
'''
Computes a reverse for a GenericForeignKey field.
Returns a dictionary of the form
{ '<type>': reverse(<type detail>) }
for example
{ 'organization': '/api/v1/organizations/1/' }
'''
if content_object is None or not hasattr(content_object, 'get_absolute_url'):
return {}
return {
camelcase_to_underscore(content_object.__class__.__name__): content_object.get_absolute_url(request=request)
}
class CopySerializer(serializers.Serializer):
name = serializers.CharField()
def validate(self, attrs):
name = attrs.get('name')
view = self.context.get('view', None)
obj = view.get_object()
if name == obj.name:
raise serializers.ValidationError(_(
'The original object is already named {}, a copy from'
' it cannot have the same name.'.format(name)
))
return attrs
class BaseSerializerMetaclass(serializers.SerializerMetaclass):
'''
Custom metaclass to enable attribute inheritance from Meta objects on
serializer base classes.
Also allows for inheriting or updating field lists from base class(es):
class Meta:
# Inherit all fields from base class.
fields = ('*',)
# Inherit all fields from base class and add 'foo'.
fields = ('*', 'foo')
# Inherit all fields from base class except 'bar'.
fields = ('*', '-bar')
# Define fields as 'foo' and 'bar'; ignore base class fields.
fields = ('foo', 'bar')
# Extra field kwargs dicts are also merged from base classes.
extra_kwargs = {
'foo': {'required': True},
'bar': {'read_only': True},
}
# If a subclass were to define extra_kwargs as:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'label': 'New Label for Bar'},
}
# The resulting value of extra_kwargs would be:
extra_kwargs = {
'foo': {'required': False, 'default': ''},
'bar': {'read_only': True, 'label': 'New Label for Bar'},
}
# Extra field kwargs cannot be removed in subclasses, only replaced.
'''
@staticmethod
def _is_list_of_strings(x):
return isinstance(x, (list, tuple)) and all([isinstance(y, str) for y in x])
@staticmethod
def _is_extra_kwargs(x):
return isinstance(x, dict) and all([isinstance(k, str) and isinstance(v, dict) for k,v in x.items()])
@classmethod
def _update_meta(cls, base, meta, other=None):
for attr in dir(other):
if attr.startswith('_'):
continue
val = getattr(other, attr)
meta_val = getattr(meta, attr, None)
# Special handling for lists/tuples of strings (field names).
if cls._is_list_of_strings(val) and cls._is_list_of_strings(meta_val or []):
meta_val = meta_val or []
new_vals = []
except_vals = []
if base: # Merge values from all bases.
new_vals.extend([x for x in meta_val])
for v in val:
if not base and v == '*': # Inherit all values from previous base(es).
new_vals.extend([x for x in meta_val])
elif not base and v.startswith('-'): # Except these values.
except_vals.append(v[1:])
else:
new_vals.append(v)
val = []
for v in new_vals:
if v not in except_vals and v not in val:
val.append(v)
val = tuple(val)
# Merge extra_kwargs dicts from base classes.
elif cls._is_extra_kwargs(val) and cls._is_extra_kwargs(meta_val or {}):
meta_val = meta_val or {}
new_val = {}
if base:
for k,v in meta_val.items():
new_val[k] = copy.deepcopy(v)
for k,v in val.items():
new_val.setdefault(k, {}).update(copy.deepcopy(v))
val = new_val
# Any other values are copied in case they are mutable objects.
else:
val = copy.deepcopy(val)
setattr(meta, attr, val)
def __new__(cls, name, bases, attrs):
meta = type('Meta', (object,), {})
for base in bases[::-1]:
cls._update_meta(base, meta, getattr(base, 'Meta', None))
cls._update_meta(None, meta, attrs.get('Meta', meta))
attrs['Meta'] = meta
return super(BaseSerializerMetaclass, cls).__new__(cls, name, bases, attrs)
class BaseSerializer(serializers.ModelSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
fields = ('id', 'type', 'url', 'related', 'summary_fields', 'created',
'modified', 'name', 'description')
summary_fields = ()
summarizable_fields = ()
# add the URL and related resources
type = serializers.SerializerMethodField()
url = serializers.SerializerMethodField()
related = serializers.SerializerMethodField('_get_related')
summary_fields = serializers.SerializerMethodField('_get_summary_fields')
# make certain fields read only
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
def __init__(self, *args, **kwargs):
super(BaseSerializer, self).__init__(*args, **kwargs)
# The following lines fix the problem of being able to pass JSON dict into PrimaryKeyRelatedField.
data = kwargs.get('data', False)
if data:
for field_name, field_instance in self.fields.items():
if isinstance(field_instance, ManyRelatedField) and not field_instance.read_only:
if isinstance(data.get(field_name, False), dict):
raise serializers.ValidationError(_('Cannot use dictionary for %s' % field_name))
@property
def version(self):
"""
The request version component of the URL as an integer i.e., 1 or 2
"""
return get_request_version(self.context.get('request')) or 1
def get_type(self, obj):
return get_type_for_model(self.Meta.model)
def get_types(self):
return [self.get_type(None)]
def get_type_choices(self):
type_name_map = {
'job': _('Playbook Run'),
'ad_hoc_command': _('Command'),
'project_update': _('SCM Update'),
'inventory_update': _('Inventory Sync'),
'system_job': _('Management Job'),
'workflow_job': _('Workflow Job'),
'workflow_job_template': _('Workflow Template'),
'job_template': _('Job Template')
}
choices = []
for t in self.get_types():
name = _(type_name_map.get(t, force_text(get_model_for_type(t)._meta.verbose_name).title()))
choices.append((t, name))
return choices
def get_url(self, obj):
if obj is None or not hasattr(obj, 'get_absolute_url'):
return ''
elif isinstance(obj, User):
return self.reverse('api:user_detail', kwargs={'pk': obj.pk})
else:
return obj.get_absolute_url(request=self.context.get('request'))
def filter_field_metadata(self, fields, method):
"""
Filter field metadata based on the request method.
This it intended to be extended by subclasses.
"""
return fields
def _get_related(self, obj):
return {} if obj is None else self.get_related(obj)
def _generate_named_url(self, url_path, obj, node):
url_units = url_path.split('/')
named_url = node.generate_named_url(obj)
url_units[4] = named_url
return '/'.join(url_units)
def get_related(self, obj):
res = OrderedDict()
view = self.context.get('view', None)
if view and (hasattr(view, 'retrieve') or view.request.method == 'POST') and \
type(obj) in settings.NAMED_URL_GRAPH:
original_url = self.get_url(obj)
if not original_url.startswith('/api/v1'):
res['named_url'] = self._generate_named_url(
original_url, obj, settings.NAMED_URL_GRAPH[type(obj)]
)
if getattr(obj, 'created_by', None):
res['created_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.created_by.pk})
if getattr(obj, 'modified_by', None):
res['modified_by'] = self.reverse('api:user_detail', kwargs={'pk': obj.modified_by.pk})
return res
def _get_summary_fields(self, obj):
return {} if obj is None else self.get_summary_fields(obj)
def get_summary_fields(self, obj):
# Return values for certain fields on related objects, to simplify
# displaying lists of items without additional API requests.
summary_fields = OrderedDict()
for fk, related_fields in SUMMARIZABLE_FK_FIELDS.items():
try:
# A few special cases where we don't want to access the field
# because it results in additional queries.
if fk == 'job' and isinstance(obj, UnifiedJob):
continue
if fk == 'project' and (isinstance(obj, InventorySource) or
isinstance(obj, Project)):
continue
try:
fkval = getattr(obj, fk, None)
except ObjectDoesNotExist:
continue
if fkval is None:
continue
if fkval == obj:
continue
summary_fields[fk] = OrderedDict()
for field in related_fields:
if self.version < 2 and field == 'credential_type_id': # TODO: remove version check in 3.3
continue
fval = getattr(fkval, field, None)
if fval is None and field == 'type':
if isinstance(fkval, PolymorphicModel):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval)
elif fval is None and field == 'unified_job_type' and isinstance(fkval, UnifiedJobTemplate):
fkval = fkval.get_real_instance()
fval = get_type_for_model(fkval._get_unified_job_class())
if fval is not None:
summary_fields[fk][field] = fval
# Can be raised by the reverse accessor for a OneToOneField.
except ObjectDoesNotExist:
pass
if getattr(obj, 'created_by', None):
summary_fields['created_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['created_by'][field] = getattr(obj.created_by, field)
if getattr(obj, 'modified_by', None):
summary_fields['modified_by'] = OrderedDict()
for field in SUMMARIZABLE_FK_FIELDS['user']:
summary_fields['modified_by'][field] = getattr(obj.modified_by, field)
# RBAC summary fields
roles = {}
for field in obj._meta.get_fields():
if type(field) is ImplicitRoleField:
roles[field.name] = role_summary_fields_generator(obj, field.name)
if len(roles) > 0:
summary_fields['object_roles'] = roles
# Advance display of RBAC capabilities
if hasattr(self, 'show_capabilities'):
user_capabilities = self._obj_capability_dict(obj)
if user_capabilities:
summary_fields['user_capabilities'] = user_capabilities
return summary_fields
def _obj_capability_dict(self, obj):
"""
Returns the user_capabilities dictionary for a single item
If inside of a list view, it runs the prefetching algorithm for
the entire current page, saves it into context
"""
view = self.context.get('view', None)
parent_obj = None
if view and hasattr(view, 'parent_model') and hasattr(view, 'get_parent_object'):
parent_obj = view.get_parent_object()
if view and view.request and view.request.user:
capabilities_cache = {}
# if serializer has parent, it is ListView, apply page capabilities prefetch
if self.parent and hasattr(self, 'capabilities_prefetch') and self.capabilities_prefetch:
qs = self.parent.instance
if 'capability_map' not in self.context:
if hasattr(self, 'polymorphic_base'):
model = self.polymorphic_base.Meta.model
prefetch_list = self.polymorphic_base._capabilities_prefetch
else:
model = self.Meta.model
prefetch_list = self.capabilities_prefetch
self.context['capability_map'] = prefetch_page_capabilities(
model, qs, prefetch_list, view.request.user
)
if obj.id in self.context['capability_map']:
capabilities_cache = self.context['capability_map'][obj.id]
return get_user_capabilities(
view.request.user, obj, method_list=self.show_capabilities, parent_obj=parent_obj,
capabilities_cache=capabilities_cache
)
else:
# Contextual information to produce user_capabilities doesn't exist
return {}
def get_created(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.date_joined
elif hasattr(obj, 'created'):
return obj.created
return None
def get_modified(self, obj):
if obj is None:
return None
elif isinstance(obj, User):
return obj.last_login # Not actually exposed for User.
elif hasattr(obj, 'modified'):
return obj.modified
return None
def get_extra_kwargs(self):
extra_kwargs = super(BaseSerializer, self).get_extra_kwargs()
if self.instance:
read_only_on_update_fields = getattr(self.Meta, 'read_only_on_update_fields', tuple())
for field_name in read_only_on_update_fields:
kwargs = extra_kwargs.get(field_name, {})
kwargs['read_only'] = True
extra_kwargs[field_name] = kwargs
return extra_kwargs
def build_standard_field(self, field_name, model_field):
# DRF 3.3 serializers.py::build_standard_field() -> utils/field_mapping.py::get_field_kwargs() short circuits
# when a Model's editable field is set to False. The short circuit skips choice rendering.
#
# This logic is to force rendering choice's on an uneditable field.
# Note: Consider expanding this rendering for more than just choices fields
# Note: This logic works in conjuction with
if hasattr(model_field, 'choices') and model_field.choices:
was_editable = model_field.editable
model_field.editable = True
field_class, field_kwargs = super(BaseSerializer, self).build_standard_field(field_name, model_field)
if hasattr(model_field, 'choices') and model_field.choices:
model_field.editable = was_editable
if was_editable is False:
field_kwargs['read_only'] = True
# Pass model field default onto the serializer field if field is not read-only.
if model_field.has_default() and not field_kwargs.get('read_only', False):
field_kwargs['default'] = field_kwargs['initial'] = model_field.get_default()
# Enforce minimum value of 0 for PositiveIntegerFields.
if isinstance(model_field, (models.PositiveIntegerField, models.PositiveSmallIntegerField)) and 'choices' not in field_kwargs:
field_kwargs['min_value'] = 0
# Use custom boolean field that allows null and empty string as False values.
if isinstance(model_field, models.BooleanField) and not field_kwargs.get('read_only', False):
field_class = BooleanNullField
# Use custom char or choice field that coerces null to an empty string.
if isinstance(model_field, (models.CharField, models.TextField)) and not field_kwargs.get('read_only', False):
if 'choices' in field_kwargs:
field_class = ChoiceNullField
else:
field_class = CharNullField
# Update the message used for the unique validator to use capitalized
# verbose name; keeps unique message the same as with DRF 2.x.
opts = self.Meta.model._meta.concrete_model._meta
for validator in field_kwargs.get('validators', []):
if isinstance(validator, validators.UniqueValidator):
unique_error_message = model_field.error_messages.get('unique', None)
if unique_error_message:
unique_error_message = unique_error_message % {
'model_name': capfirst(opts.verbose_name),
'field_label': capfirst(model_field.verbose_name),
}
validator.message = unique_error_message
return field_class, field_kwargs
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(BaseSerializer, self).build_relational_field(field_name, relation_info)
# Don't include choices for foreign key fields.
field_kwargs.pop('choices', None)
return field_class, field_kwargs
def get_unique_together_validators(self):
# Allow the model's full_clean method to handle the unique together validation.
return []
def run_validation(self, data=fields.empty):
try:
return super(BaseSerializer, self).run_validation(data)
except ValidationError as exc:
# Avoid bug? in DRF if exc.detail happens to be a list instead of a dict.
raise ValidationError(detail=serializers.as_serializer_error(exc))
def get_validation_exclusions(self, obj=None):
# Borrowed from DRF 2.x - return model fields that should be excluded
# from model validation.
cls = self.Meta.model
opts = cls._meta.concrete_model._meta
exclusions = [field.name for field in opts.fields]
for field_name, field in self.fields.items():
field_name = field.source or field_name
if field_name not in exclusions:
continue
if field.read_only:
continue
if isinstance(field, serializers.Serializer):
continue
exclusions.remove(field_name)
# The clean_ methods cannot be ran on many-to-many models
exclusions.extend([field.name for field in opts.many_to_many])
return exclusions
def validate(self, attrs):
attrs = super(BaseSerializer, self).validate(attrs)
try:
# Create/update a model instance and run it's full_clean() method to
# do any validation implemented on the model class.
exclusions = self.get_validation_exclusions(self.instance)
obj = self.instance or self.Meta.model()
for k,v in attrs.items():
if k not in exclusions:
setattr(obj, k, v)
obj.full_clean(exclude=exclusions)
# full_clean may modify values on the instance; copy those changes
# back to attrs so they are saved.
for k in attrs.keys():
if k not in exclusions:
attrs[k] = getattr(obj, k)
except DjangoValidationError as exc:
# DjangoValidationError may contain a list or dict; normalize into a
# dict where the keys are the field name and the values are a list
# of error messages, then raise as a DRF ValidationError. DRF would
# normally convert any DjangoValidationError to a non-field specific
# error message; here we preserve field-specific errors raised from
# the model's full_clean method.
d = exc.update_error_dict({})
for k,v in d.items():
v = v if isinstance(v, list) else [v]
v2 = []
for e in v:
if isinstance(e, DjangoValidationError):
v2.extend(list(e))
elif isinstance(e, list):
v2.extend(e)
else:
v2.append(e)
d[k] = list(map(force_text, v2))
raise ValidationError(d)
return attrs
def reverse(self, *args, **kwargs):
kwargs['request'] = self.context.get('request')
return reverse(*args, **kwargs)
@property
def is_detail_view(self):
if 'view' in self.context:
if 'pk' in self.context['view'].kwargs:
return True
return False
class EmptySerializer(serializers.Serializer):
pass
class UnifiedJobTemplateSerializer(BaseSerializer):
# As a base serializer, the capabilities prefetch is not used directly
_capabilities_prefetch = [
'admin', 'execute',
{'copy': ['jobtemplate.project.use', 'jobtemplate.inventory.use',
'workflowjobtemplate.organization.workflow_admin']}
]
class Meta:
model = UnifiedJobTemplate
fields = ('*', 'last_job_run', 'last_job_failed',
'next_job_run', 'status')
def get_related(self, obj):
res = super(UnifiedJobTemplateSerializer, self).get_related(obj)
if obj.current_job:
res['current_job'] = obj.current_job.get_absolute_url(request=self.context.get('request'))
if obj.last_job:
res['last_job'] = obj.last_job.get_absolute_url(request=self.context.get('request'))
if obj.next_schedule:
res['next_schedule'] = obj.next_schedule.get_absolute_url(request=self.context.get('request'))
return res
def get_types(self):
if type(self) is UnifiedJobTemplateSerializer:
return ['project', 'inventory_source', 'job_template', 'system_job_template', 'workflow_job_template',]
else:
return super(UnifiedJobTemplateSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobTemplateSerializer:
if isinstance(obj, Project):
serializer_class = ProjectSerializer
elif isinstance(obj, InventorySource):
serializer_class = InventorySourceSerializer
elif isinstance(obj, JobTemplate):
serializer_class = JobTemplateSerializer
elif isinstance(obj, SystemJobTemplate):
serializer_class = SystemJobTemplateSerializer
elif isinstance(obj, WorkflowJobTemplate):
serializer_class = WorkflowJobTemplateSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# capabilities prefetch is only valid for these models
if isinstance(obj, (JobTemplate, WorkflowJobTemplate)):
serializer.capabilities_prefetch = self._capabilities_prefetch
else:
serializer.capabilities_prefetch = None
return serializer.to_representation(obj)
else:
return super(UnifiedJobTemplateSerializer, self).to_representation(obj)
class UnifiedJobSerializer(BaseSerializer):
show_capabilities = ['start', 'delete']
event_processing_finished = serializers.BooleanField(
help_text=_('Indicates whether all of the events generated by this '
'unified job have been saved to the database.'),
read_only=True
)
class Meta:
model = UnifiedJob
fields = ('*', 'unified_job_template', 'launch_type', 'status',
'failed', 'started', 'finished', 'elapsed', 'job_args',
'job_cwd', 'job_env', 'job_explanation',
'execution_node', 'controller_node',
'result_traceback', 'event_processing_finished')
extra_kwargs = {
'unified_job_template': {
'source': 'unified_job_template_id',
'label': 'unified job template',
},
'job_env': {
'read_only': True,
'label': 'job_env',
}
}
def get_types(self):
if type(self) is UnifiedJobSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job',]
else:
return super(UnifiedJobSerializer, self).get_types()
def get_related(self, obj):
res = super(UnifiedJobSerializer, self).get_related(obj)
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(request=self.context.get('request'))
if obj.schedule:
res['schedule'] = obj.schedule.get_absolute_url(request=self.context.get('request'))
if isinstance(obj, ProjectUpdate):
res['stdout'] = self.reverse('api:project_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, InventoryUpdate):
res['stdout'] = self.reverse('api:inventory_update_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, Job):
res['stdout'] = self.reverse('api:job_stdout', kwargs={'pk': obj.pk})
elif isinstance(obj, AdHocCommand):
res['stdout'] = self.reverse('api:ad_hoc_command_stdout', kwargs={'pk': obj.pk})
if obj.workflow_job_id:
res['source_workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(UnifiedJobSerializer, self).get_summary_fields(obj)
if obj.spawned_by_workflow:
summary_fields['source_workflow_job'] = {}
try:
summary_obj = obj.unified_job_node.workflow_job
except UnifiedJob.unified_job_node.RelatedObjectDoesNotExist:
return summary_fields
for field in SUMMARIZABLE_FK_FIELDS['job']:
val = getattr(summary_obj, field, None)
if val is not None:
summary_fields['source_workflow_job'][field] = val
return summary_fields
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateSerializer
elif isinstance(obj, Job):
serializer_class = JobSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
# preserve links for list view
if self.parent:
serializer.parent = self.parent
serializer.polymorphic_base = self
# TODO: restrict models for capabilities prefetch, when it is added
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobSerializer, self).to_representation(obj)
if 'elapsed' in ret:
if obj and obj.pk and obj.started and not obj.finished:
td = now() - obj.started
ret['elapsed'] = (td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6) / (10 ** 6 * 1.0)
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobListSerializer(UnifiedJobSerializer):
class Meta:
fields = ('*', '-job_args', '-job_cwd', '-job_env', '-result_traceback', '-event_processing_finished')
def get_field_names(self, declared_fields, info):
field_names = super(UnifiedJobListSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('job_args', 'job_cwd', 'job_env', 'result_traceback', 'event_processing_finished'))
def get_types(self):
if type(self) is UnifiedJobListSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job', 'workflow_job']
else:
return super(UnifiedJobListSerializer, self).get_types()
def get_sub_serializer(self, obj):
serializer_class = None
if type(self) is UnifiedJobListSerializer:
if isinstance(obj, ProjectUpdate):
serializer_class = ProjectUpdateListSerializer
elif isinstance(obj, InventoryUpdate):
serializer_class = InventoryUpdateListSerializer
elif isinstance(obj, Job):
serializer_class = JobListSerializer
elif isinstance(obj, AdHocCommand):
serializer_class = AdHocCommandListSerializer
elif isinstance(obj, SystemJob):
serializer_class = SystemJobListSerializer
elif isinstance(obj, WorkflowJob):
serializer_class = WorkflowJobListSerializer
return serializer_class
def to_representation(self, obj):
serializer_class = self.get_sub_serializer(obj)
if serializer_class:
serializer = serializer_class(instance=obj, context=self.context)
ret = serializer.to_representation(obj)
else:
ret = super(UnifiedJobListSerializer, self).to_representation(obj)
if 'elapsed' in ret:
ret['elapsed'] = float(ret['elapsed'])
return ret
class UnifiedJobStdoutSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
fields = ('result_stdout',)
def get_types(self):
if type(self) is UnifiedJobStdoutSerializer:
return ['project_update', 'inventory_update', 'job', 'ad_hoc_command', 'system_job']
else:
return super(UnifiedJobStdoutSerializer, self).get_types()
class UserSerializer(BaseSerializer):
password = serializers.CharField(required=False, default='', write_only=True,
help_text=_('Write-only field used to change the password.'))
ldap_dn = serializers.CharField(source='profile.ldap_dn', read_only=True)
external_account = serializers.SerializerMethodField(help_text=_('Set if the account is managed by an external service'))
is_system_auditor = serializers.BooleanField(default=False)
show_capabilities = ['edit', 'delete']
class Meta:
model = User
fields = ('*', '-name', '-description', '-modified',
'username', 'first_name', 'last_name',
'email', 'is_superuser', 'is_system_auditor', 'password', 'ldap_dn', 'last_login', 'external_account')
def to_representation(self, obj): # TODO: Remove in 3.3
ret = super(UserSerializer, self).to_representation(obj)
ret.pop('password', None)
if obj and type(self) is UserSerializer or self.version == 1:
ret['auth'] = obj.social_auth.values('provider', 'uid')
return ret
def get_validation_exclusions(self, obj=None):
ret = super(UserSerializer, self).get_validation_exclusions(obj)
ret.extend(['password', 'is_system_auditor'])
return ret
def validate_password(self, value):
if not self.instance and value in (None, ''):
raise serializers.ValidationError(_('Password required for new User.'))
return value
def _update_password(self, obj, new_password):
# For now we're not raising an error, just not saving password for
# users managed by LDAP who already have an unusable password set.
if getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
try:
if obj.pk and obj.profile.ldap_dn and not obj.has_usable_password():
new_password = None
except AttributeError:
pass
if (getattr(settings, 'SOCIAL_AUTH_GOOGLE_OAUTH2_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_ORG_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_GITHUB_TEAM_KEY', None) or
getattr(settings, 'SOCIAL_AUTH_SAML_ENABLED_IDPS', None)) and obj.social_auth.all():
new_password = None
if (getattr(settings, 'RADIUS_SERVER', None) or
getattr(settings, 'TACACSPLUS_HOST', None)) and obj.enterprise_auth.all():
new_password = None
if new_password:
obj.set_password(new_password)
obj.save(update_fields=['password'])
# Cycle the session key, but if the requesting user is the same
# as the modified user then inject a session key derived from
# the updated user to prevent logout. This is the logic used by
# the Django admin's own user_change_password view.
update_session_auth_hash(self.context['request'], obj)
elif not obj.password:
obj.set_unusable_password()
obj.save(update_fields=['password'])
def get_external_account(self, obj):
return get_external_account(obj)
def create(self, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).create(validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def update(self, obj, validated_data):
new_password = validated_data.pop('password', None)
is_system_auditor = validated_data.pop('is_system_auditor', None)
obj = super(UserSerializer, self).update(obj, validated_data)
self._update_password(obj, new_password)
if is_system_auditor is not None:
obj.is_system_auditor = is_system_auditor
return obj
def get_related(self, obj):
res = super(UserSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:user_teams_list', kwargs={'pk': obj.pk}),
organizations = self.reverse('api:user_organizations_list', kwargs={'pk': obj.pk}),
admin_of_organizations = self.reverse('api:user_admin_of_organizations_list', kwargs={'pk': obj.pk}),
projects = self.reverse('api:user_projects_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:user_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:user_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:user_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:user_access_list', kwargs={'pk': obj.pk}),
tokens = self.reverse('api:o_auth2_token_list', kwargs={'pk': obj.pk}),
authorized_tokens = self.reverse('api:user_authorized_token_list', kwargs={'pk': obj.pk}),
personal_tokens = self.reverse('api:user_personal_token_list', kwargs={'pk': obj.pk}),
))
return res
def _validate_ldap_managed_field(self, value, field_name):
if not getattr(settings, 'AUTH_LDAP_SERVER_URI', None):
return value
try:
is_ldap_user = bool(self.instance and self.instance.profile.ldap_dn)
except AttributeError:
is_ldap_user = False
if is_ldap_user:
ldap_managed_fields = ['username']
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_ATTR_MAP', {}).keys())
ldap_managed_fields.extend(getattr(settings, 'AUTH_LDAP_USER_FLAGS_BY_GROUP', {}).keys())
if field_name in ldap_managed_fields:
if value != getattr(self.instance, field_name):
raise serializers.ValidationError(_('Unable to change %s on user managed by LDAP.') % field_name)
return value
def validate_username(self, value):
return self._validate_ldap_managed_field(value, 'username')
def validate_first_name(self, value):
return self._validate_ldap_managed_field(value, 'first_name')
def validate_last_name(self, value):
return self._validate_ldap_managed_field(value, 'last_name')
def validate_email(self, value):
return self._validate_ldap_managed_field(value, 'email')
def validate_is_superuser(self, value):
return self._validate_ldap_managed_field(value, 'is_superuser')
class UserActivityStreamSerializer(UserSerializer):
"""Changes to system auditor status are shown as separate entries,
so by excluding it from fields here we avoid duplication, which
would carry some unintended consequences.
"""
class Meta:
model = User
fields = ('*', '-is_system_auditor')
class BaseOAuth2TokenSerializer(BaseSerializer):
refresh_token = serializers.SerializerMethodField()
token = serializers.SerializerMethodField()
ALLOWED_SCOPES = ['read', 'write']
class Meta:
model = OAuth2AccessToken
fields = (
'*', '-name', 'description', 'user', 'token', 'refresh_token',
'application', 'expires', 'scope',
)
read_only_fields = ('user', 'token', 'expires', 'refresh_token')
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True}
}
def get_token(self, obj):
request = self.context.get('request', None)
try:
if request.method == 'POST':
return obj.token
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return ''
def get_refresh_token(self, obj):
request = self.context.get('request', None)
try:
if not obj.refresh_token:
return None
elif request.method == 'POST':
return getattr(obj.refresh_token, 'token', '')
else:
return CENSOR_VALUE
except ObjectDoesNotExist:
return None
def get_related(self, obj):
ret = super(BaseOAuth2TokenSerializer, self).get_related(obj)
if obj.user:
ret['user'] = self.reverse('api:user_detail', kwargs={'pk': obj.user.pk})
if obj.application:
ret['application'] = self.reverse(
'api:o_auth2_application_detail', kwargs={'pk': obj.application.pk}
)
ret['activity_stream'] = self.reverse(
'api:o_auth2_token_activity_stream_list', kwargs={'pk': obj.pk}
)
return ret
def _is_valid_scope(self, value):
if not value or (not isinstance(value, str)):
return False
words = value.split()
for word in words:
if words.count(word) > 1:
return False # do not allow duplicates
if word not in self.ALLOWED_SCOPES:
return False
return True
def validate_scope(self, value):
if not self._is_valid_scope(value):
raise serializers.ValidationError(_(
'Must be a simple space-separated string with allowed scopes {}.'
).format(self.ALLOWED_SCOPES))
return value
def create(self, validated_data):
validated_data['user'] = self.context['request'].user
try:
return super(BaseOAuth2TokenSerializer, self).create(validated_data)
except oauth2.AccessDeniedError as e:
raise PermissionDenied(str(e))
class UserAuthorizedTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
extra_kwargs = {
'scope': {'allow_null': False, 'required': False},
'user': {'allow_null': False, 'required': True},
'application': {'allow_null': False, 'required': True}
}
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(UserAuthorizedTokenSerializer, self).create(validated_data)
obj.save()
if obj.application and obj.application.authorization_grant_type != 'implicit':
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenSerializer(BaseOAuth2TokenSerializer):
def create(self, validated_data):
current_user = self.context['request'].user
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
obj = super(OAuth2TokenSerializer, self).create(validated_data)
if obj.application and obj.application.user:
obj.user = obj.application.user
obj.save()
if obj.application and obj.application.authorization_grant_type != 'implicit':
RefreshToken.objects.create(
user=current_user,
token=generate_token(),
application=obj.application,
access_token=obj
)
return obj
class OAuth2TokenDetailSerializer(OAuth2TokenSerializer):
class Meta:
read_only_fields = ('*', 'user', 'application')
class UserPersonalTokenSerializer(BaseOAuth2TokenSerializer):
class Meta:
read_only_fields = ('user', 'token', 'expires', 'application')
def create(self, validated_data):
validated_data['token'] = generate_token()
validated_data['expires'] = now() + timedelta(
seconds=settings.OAUTH2_PROVIDER['ACCESS_TOKEN_EXPIRE_SECONDS']
)
validated_data['application'] = None
obj = super(UserPersonalTokenSerializer, self).create(validated_data)
obj.save()
return obj
class OAuth2ApplicationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = OAuth2Application
fields = (
'*', 'description', '-user', 'client_id', 'client_secret', 'client_type',
'redirect_uris', 'authorization_grant_type', 'skip_authorization', 'organization'
)
read_only_fields = ('client_id', 'client_secret')
read_only_on_update_fields = ('user', 'authorization_grant_type')
extra_kwargs = {
'user': {'allow_null': True, 'required': False},
'organization': {'allow_null': False},
'authorization_grant_type': {'allow_null': False, 'label': _('Authorization Grant Type')},
'client_secret': {
'label': _('Client Secret')
},
'client_type': {
'label': _('Client Type')
},
'redirect_uris': {
'label': _('Redirect URIs')
},
'skip_authorization': {
'label': _('Skip Authorization')
},
}
def to_representation(self, obj):
ret = super(OAuth2ApplicationSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if request.method != 'POST' and obj.client_type == 'confidential':
ret['client_secret'] = CENSOR_VALUE
if obj.client_type == 'public':
ret.pop('client_secret', None)
return ret
def get_related(self, obj):
res = super(OAuth2ApplicationSerializer, self).get_related(obj)
res.update(dict(
tokens = self.reverse('api:o_auth2_application_token_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse(
'api:o_auth2_application_activity_stream_list', kwargs={'pk': obj.pk}
)
))
return res
def get_modified(self, obj):
if obj is None:
return None
return obj.updated
def _summary_field_tokens(self, obj):
token_list = [{'id': x.pk, 'token': CENSOR_VALUE, 'scope': x.scope} for x in obj.oauth2accesstoken_set.all()[:10]]
if has_model_field_prefetched(obj, 'oauth2accesstoken_set'):
token_count = len(obj.oauth2accesstoken_set.all())
else:
if len(token_list) < 10:
token_count = len(token_list)
else:
token_count = obj.oauth2accesstoken_set.count()
return {'count': token_count, 'results': token_list}
def get_summary_fields(self, obj):
ret = super(OAuth2ApplicationSerializer, self).get_summary_fields(obj)
ret['tokens'] = self._summary_field_tokens(obj)
return ret
class OrganizationSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Organization
fields = ('*', 'max_hosts', 'custom_virtualenv',)
def get_related(self, obj):
res = super(OrganizationSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:organization_projects_list', kwargs={'pk': obj.pk}),
inventories = self.reverse('api:organization_inventories_list', kwargs={'pk': obj.pk}),
workflow_job_templates = self.reverse('api:organization_workflow_job_templates_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:organization_users_list', kwargs={'pk': obj.pk}),
admins = self.reverse('api:organization_admins_list', kwargs={'pk': obj.pk}),
teams = self.reverse('api:organization_teams_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:organization_credential_list', kwargs={'pk': obj.pk}),
applications = self.reverse('api:organization_applications_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:organization_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates = self.reverse('api:organization_notification_templates_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:organization_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:organization_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:organization_notification_templates_error_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:organization_object_roles_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:organization_access_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:organization_instance_groups_list', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
summary_dict = super(OrganizationSerializer, self).get_summary_fields(obj)
counts_dict = self.context.get('related_field_counts', None)
if counts_dict is not None and summary_dict is not None:
if obj.id not in counts_dict:
summary_dict['related_field_counts'] = {
'inventories': 0, 'teams': 0, 'users': 0,
'job_templates': 0, 'admins': 0, 'projects': 0}
else:
summary_dict['related_field_counts'] = counts_dict[obj.id]
return summary_dict
def validate(self, attrs):
obj = self.instance
view = self.context['view']
obj_limit = getattr(obj, 'max_hosts', None)
api_limit = attrs.get('max_hosts')
if not view.request.user.is_superuser:
if api_limit is not None and api_limit != obj_limit:
# Only allow superusers to edit the max_hosts field
raise serializers.ValidationError(_('Cannot change max_hosts.'))
return super(OrganizationSerializer, self).validate(attrs)
class ProjectOptionsSerializer(BaseSerializer):
class Meta:
fields = ('*', 'local_path', 'scm_type', 'scm_url', 'scm_branch',
'scm_clean', 'scm_delete_on_update', 'credential', 'timeout',)
def get_related(self, obj):
res = super(ProjectOptionsSerializer, self).get_related(obj)
if obj.credential:
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential.pk})
return res
def validate(self, attrs):
errors = {}
# Don't allow assigning a local_path used by another project.
# Don't allow assigning a local_path when scm_type is set.
valid_local_paths = Project.get_local_path_choices()
if self.instance:
scm_type = attrs.get('scm_type', self.instance.scm_type) or u''
else:
scm_type = attrs.get('scm_type', u'') or u''
if self.instance and not scm_type:
valid_local_paths.append(self.instance.local_path)
if scm_type:
attrs.pop('local_path', None)
if 'local_path' in attrs and attrs['local_path'] not in valid_local_paths:
errors['local_path'] = _('This path is already being used by another manual project.')
if errors:
raise serializers.ValidationError(errors)
return super(ProjectOptionsSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(ProjectOptionsSerializer, self).to_representation(obj)
if obj is not None and 'credential' in ret and not obj.credential:
ret['credential'] = None
return ret
class ProjectSerializer(UnifiedJobTemplateSerializer, ProjectOptionsSerializer):
status = serializers.ChoiceField(choices=Project.PROJECT_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete', 'copy']
capabilities_prefetch = [
'admin', 'update',
{'copy': 'organization.project_admin'}
]
class Meta:
model = Project
fields = ('*', 'organization', 'scm_update_on_launch',
'scm_update_cache_timeout', 'scm_revision', 'custom_virtualenv',) + \
('last_update_failed', 'last_updated') # Backwards compatibility
def get_related(self, obj):
res = super(ProjectSerializer, self).get_related(obj)
res.update(dict(
teams = self.reverse('api:project_teams_list', kwargs={'pk': obj.pk}),
playbooks = self.reverse('api:project_playbooks', kwargs={'pk': obj.pk}),
inventory_files = self.reverse('api:project_inventories', kwargs={'pk': obj.pk}),
update = self.reverse('api:project_update_view', kwargs={'pk': obj.pk}),
project_updates = self.reverse('api:project_updates_list', kwargs={'pk': obj.pk}),
scm_inventory_sources = self.reverse('api:project_scm_inventory_sources', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:project_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:project_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:project_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:project_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:project_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:project_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:project_object_roles_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:project_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail',
kwargs={'pk': obj.organization.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.last_update.pk})
return res
def to_representation(self, obj):
ret = super(ProjectSerializer, self).to_representation(obj)
if 'scm_revision' in ret and obj.scm_type == '':
ret['scm_revision'] = ''
return ret
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
organization = None
if 'organization' in attrs:
organization = attrs['organization']
elif self.instance:
organization = self.instance.organization
view = self.context.get('view', None)
if not organization and not view.request.user.is_superuser:
# Only allow super users to create orgless projects
raise serializers.ValidationError(_('Organization is missing'))
elif get_field_from_model_or_attrs('scm_type') == '':
for fd in ('scm_update_on_launch', 'scm_delete_on_update', 'scm_clean'):
if get_field_from_model_or_attrs(fd):
raise serializers.ValidationError({fd: _('Update options must be set to false for manual projects.')})
return super(ProjectSerializer, self).validate(attrs)
class ProjectPlaybooksSerializer(ProjectSerializer):
playbooks = serializers.SerializerMethodField(help_text=_('Array of playbooks available within this project.'))
class Meta:
model = Project
fields = ('playbooks',)
def get_playbooks(self, obj):
return obj.playbook_files if obj.scm_type else obj.playbooks
@property
def data(self):
ret = super(ProjectPlaybooksSerializer, self).data
ret = ret.get('playbooks', [])
return ReturnList(ret, serializer=self)
class ProjectInventoriesSerializer(ProjectSerializer):
inventory_files = serializers.ReadOnlyField(help_text=_(
'Array of inventory files and directories available within this project, '
'not comprehensive.'))
class Meta:
model = Project
fields = ('inventory_files',)
@property
def data(self):
ret = super(ProjectInventoriesSerializer, self).data
ret = ret.get('inventory_files', [])
return ReturnList(ret, serializer=self)
class ProjectUpdateViewSerializer(ProjectSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class ProjectUpdateSerializer(UnifiedJobSerializer, ProjectOptionsSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', 'project', 'job_type', '-controller_node')
def get_related(self, obj):
res = super(ProjectUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
project = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk}),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:project_update_cancel', kwargs={'pk': obj.pk}),
scm_inventory_updates = self.reverse('api:project_update_scm_inventory_updates', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:project_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:project_update_events_list', kwargs={'pk': obj.pk}),
))
return res
class ProjectUpdateDetailSerializer(ProjectUpdateSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
class Meta:
model = ProjectUpdate
fields = ('*', 'host_status_counts', 'playbook_counts',)
def get_playbook_counts(self, obj):
task_count = obj.project_update_events.filter(event='playbook_on_task_start').count()
play_count = obj.project_update_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.project_update_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except ProjectUpdateEvent.DoesNotExist:
counts = {}
return counts
class ProjectUpdateListSerializer(ProjectUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = ProjectUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class ProjectUpdateCancelSerializer(ProjectUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class BaseSerializerWithVariables(BaseSerializer):
def validate_variables(self, value):
return vars_validate_or_raise(value)
class InventorySerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete', 'adhoc', 'copy']
capabilities_prefetch = [
'admin', 'adhoc',
{'copy': 'organization.inventory_admin'}
]
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Inventory
fields = ('*', 'organization', 'kind', 'host_filter', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources',
'total_inventory_sources', 'inventory_sources_with_failures',
'insights_credential', 'pending_deletion',)
def get_related(self, obj):
res = super(InventorySerializer, self).get_related(obj)
res.update(dict(
hosts = self.reverse('api:inventory_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_groups_list', kwargs={'pk': obj.pk}),
root_groups = self.reverse('api:inventory_root_groups_list', kwargs={'pk': obj.pk}),
variable_data = self.reverse('api:inventory_variable_data', kwargs={'pk': obj.pk}),
script = self.reverse('api:inventory_script_view', kwargs={'pk': obj.pk}),
tree = self.reverse('api:inventory_tree_view', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:inventory_inventory_sources_list', kwargs={'pk': obj.pk}),
update_inventory_sources = self.reverse('api:inventory_inventory_sources_update', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_activity_stream_list', kwargs={'pk': obj.pk}),
job_templates = self.reverse('api:inventory_job_template_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:inventory_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:inventory_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:inventory_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:inventory_instance_groups_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:inventory_copy', kwargs={'pk': obj.pk})
if obj.insights_credential:
res['insights_credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.insights_credential.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(InventorySerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
def validate_host_filter(self, host_filter):
if host_filter:
try:
for match in JSONBField.get_lookups().keys():
if match == 'exact':
# __exact is allowed
continue
match = '__{}'.format(match)
if re.match(
'ansible_facts[^=]+{}='.format(match),
host_filter
):
raise models.base.ValidationError({
'host_filter': 'ansible_facts does not support searching with {}'.format(match)
})
SmartFilter().query_from_string(host_filter)
except RuntimeError as e:
raise models.base.ValidationError(e)
return host_filter
def validate(self, attrs):
kind = None
if 'kind' in attrs:
kind = attrs['kind']
elif self.instance:
kind = self.instance.kind
host_filter = None
if 'host_filter' in attrs:
host_filter = attrs['host_filter']
elif self.instance:
host_filter = self.instance.host_filter
if kind == 'smart' and not host_filter:
raise serializers.ValidationError({'host_filter': _(
'Smart inventories must specify host_filter')})
return super(InventorySerializer, self).validate(attrs)
# TODO: Remove entire serializer in 3.3, replace with normal serializer
class InventoryDetailSerializer(InventorySerializer):
def get_fields(self):
fields = super(InventoryDetailSerializer, self).get_fields()
if self.version == 1:
fields['can_run_ad_hoc_commands'] = serializers.SerializerMethodField()
return fields
def get_can_run_ad_hoc_commands(self, obj):
view = self.context.get('view', None)
return bool(obj and view and view.request and view.request.user and view.request.user.can_access(Inventory, 'run_ad_hoc_commands', obj))
class InventoryScriptSerializer(InventorySerializer):
class Meta:
fields = ()
class HostSerializer(BaseSerializerWithVariables):
show_capabilities = ['edit', 'delete']
capabilities_prefetch = ['inventory.admin']
class Meta:
model = Host
fields = ('*', 'inventory', 'enabled', 'instance_id', 'variables',
'has_active_failures', 'has_inventory_sources', 'last_job',
'last_job_host_summary', 'insights_system_id', 'ansible_facts_modified',)
read_only_fields = ('last_job', 'last_job_host_summary', 'insights_system_id',
'ansible_facts_modified',)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(HostSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new host.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(HostSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:host_variable_data', kwargs={'pk': obj.pk}),
groups = self.reverse('api:host_groups_list', kwargs={'pk': obj.pk}),
all_groups = self.reverse('api:host_all_groups_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:host_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:host_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:host_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:host_inventory_sources_list', kwargs={'pk': obj.pk}),
smart_inventories = self.reverse('api:host_smart_inventories_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:host_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
ad_hoc_command_events = self.reverse('api:host_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['insights'] = self.reverse('api:host_insights', kwargs={'pk': obj.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.last_job:
res['last_job'] = self.reverse('api:job_detail', kwargs={'pk': obj.last_job.pk})
if obj.last_job_host_summary:
res['last_job_host_summary'] = self.reverse('api:job_host_summary_detail', kwargs={'pk': obj.last_job_host_summary.pk})
if self.version > 1:
res.update(dict(
ansible_facts = self.reverse('api:host_ansible_facts_detail', kwargs={'pk': obj.pk}),
))
return res
def get_summary_fields(self, obj):
d = super(HostSerializer, self).get_summary_fields(obj)
try:
d['last_job']['job_template_id'] = obj.last_job.job_template.id
d['last_job']['job_template_name'] = obj.last_job.job_template.name
except (KeyError, AttributeError):
pass
if has_model_field_prefetched(obj, 'groups'):
group_list = sorted([{'id': g.id, 'name': g.name} for g in obj.groups.all()], key=lambda x: x['id'])[:5]
else:
group_list = [{'id': g.id, 'name': g.name} for g in obj.groups.all().order_by('id')[:5]]
group_cnt = obj.groups.count()
d.setdefault('groups', {'count': group_cnt, 'results': group_list})
d.setdefault('recent_jobs', [{
'id': j.job.id,
'name': j.job.job_template.name if j.job.job_template is not None else "",
'status': j.job.status,
'finished': j.job.finished,
} for j in obj.job_host_summaries.select_related('job__job_template').order_by('-created')[:5]])
return d
def _get_host_port_from_name(self, name):
# Allow hostname (except IPv6 for now) to specify the port # inline.
port = None
if name.count(':') == 1:
name, port = name.split(':')
try:
port = int(port)
if port < 1 or port > 65535:
raise ValueError
except ValueError:
raise serializers.ValidationError(_(u'Invalid port specification: %s') % force_text(port))
return name, port
def validate_name(self, value):
name = force_text(value or '')
# Validate here only, update in main validate method.
host, port = self._get_host_port_from_name(name)
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Host for Smart Inventory")})
return value
def validate_variables(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
name = force_text(attrs.get('name', self.instance and self.instance.name or ''))
host, port = self._get_host_port_from_name(name)
if port:
attrs['name'] = host
variables = force_text(attrs.get('variables', self.instance and self.instance.variables or ''))
vars_dict = parse_yaml_or_json(variables)
vars_dict['ansible_ssh_port'] = port
attrs['variables'] = json.dumps(vars_dict)
return super(HostSerializer, self).validate(attrs)
def to_representation(self, obj):
ret = super(HostSerializer, self).to_representation(obj)
if not obj:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'last_job' in ret and not obj.last_job:
ret['last_job'] = None
if 'last_job_host_summary' in ret and not obj.last_job_host_summary:
ret['last_job_host_summary'] = None
return ret
class AnsibleFactsSerializer(BaseSerializer):
class Meta:
model = Host
def to_representation(self, obj):
return obj.ansible_facts
class GroupSerializer(BaseSerializerWithVariables):
capabilities_prefetch = ['inventory.admin', 'inventory.adhoc']
groups_with_active_failures = serializers.IntegerField(
read_only=True,
min_value=0,
help_text=_('This field has been deprecated and will be removed in a future release')
)
class Meta:
model = Group
fields = ('*', 'inventory', 'variables', 'has_active_failures',
'total_hosts', 'hosts_with_active_failures', 'total_groups',
'groups_with_active_failures', 'has_inventory_sources')
@property
def show_capabilities(self): # TODO: consolidate in 3.3
if self.version == 1:
return ['copy', 'edit', 'start', 'schedule', 'delete']
else:
return ['copy', 'edit', 'delete']
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(GroupSerializer, self).build_relational_field(field_name, relation_info)
# Inventory is read-only unless creating a new group.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def get_summary_fields(self, obj): # TODO: remove in 3.3
summary_fields = super(GroupSerializer, self).get_summary_fields(obj)
if self.version == 1:
try:
inv_src = obj.deprecated_inventory_source
summary_fields['inventory_source'] = {}
for field in SUMMARIZABLE_FK_FIELDS['inventory_source']:
fval = getattr(inv_src, field, None)
if fval is not None:
summary_fields['inventory_source'][field] = fval
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
return summary_fields
def get_related(self, obj):
res = super(GroupSerializer, self).get_related(obj)
res.update(dict(
variable_data = self.reverse('api:group_variable_data', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:group_hosts_list', kwargs={'pk': obj.pk}),
potential_children = self.reverse('api:group_potential_children_list', kwargs={'pk': obj.pk}),
children = self.reverse('api:group_children_list', kwargs={'pk': obj.pk}),
all_hosts = self.reverse('api:group_all_hosts_list', kwargs={'pk': obj.pk}),
job_events = self.reverse('api:group_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:group_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:group_activity_stream_list', kwargs={'pk': obj.pk}),
inventory_sources = self.reverse('api:group_inventory_sources_list', kwargs={'pk': obj.pk}),
ad_hoc_commands = self.reverse('api:group_ad_hoc_commands_list', kwargs={'pk': obj.pk}),
))
if self.version == 1: # TODO: remove in 3.3
try:
res['inventory_source'] = self.reverse('api:inventory_source_detail',
kwargs={'pk': obj.deprecated_inventory_source.pk})
except Group.deprecated_inventory_source.RelatedObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
return res
def create(self, validated_data): # TODO: remove in 3.3
instance = super(GroupSerializer, self).create(validated_data)
if self.version == 1: # TODO: remove in 3.3
manual_src = InventorySource(deprecated_group=instance, inventory=instance.inventory)
manual_src.v1_group_name = instance.name
manual_src.save()
return instance
def validate_name(self, value):
if value in ('all', '_meta'):
raise serializers.ValidationError(_('Invalid group name.'))
return value
def validate_inventory(self, value):
if value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Group for Smart Inventory")})
return value
def to_representation(self, obj):
ret = super(GroupSerializer, self).to_representation(obj)
if obj is not None and 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
class GroupTreeSerializer(GroupSerializer):
children = serializers.SerializerMethodField()
class Meta:
model = Group
fields = ('*', 'children')
def get_children(self, obj):
if obj is None:
return {}
children_qs = obj.children
children_qs = children_qs.select_related('inventory')
children_qs = children_qs.prefetch_related('inventory_source')
return GroupTreeSerializer(children_qs, many=True).data
class BaseVariableDataSerializer(BaseSerializer):
class Meta:
fields = ('variables',)
def to_representation(self, obj):
if obj is None:
return {}
ret = super(BaseVariableDataSerializer, self).to_representation(obj)
return parse_yaml_or_json(ret.get('variables', '') or '{}')
def to_internal_value(self, data):
data = {'variables': json.dumps(data)}
return super(BaseVariableDataSerializer, self).to_internal_value(data)
class InventoryVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Inventory
class HostVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Host
class GroupVariableDataSerializer(BaseVariableDataSerializer):
class Meta:
model = Group
class CustomInventoryScriptSerializer(BaseSerializer):
script = serializers.CharField(trim_whitespace=False)
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [
{'edit': 'admin'}
]
class Meta:
model = CustomInventoryScript
fields = ('*', "script", "organization")
def validate_script(self, value):
if not value.startswith("#!"):
raise serializers.ValidationError(_('Script must begin with a hashbang sequence: i.e.... #!/usr/bin/env python'))
return value
def to_representation(self, obj):
ret = super(CustomInventoryScriptSerializer, self).to_representation(obj)
if obj is None:
return ret
request = self.context.get('request', None)
if request.user not in obj.admin_role and \
not request.user.is_superuser and \
not request.user.is_system_auditor and \
not (obj.organization is not None and request.user in obj.organization.auditor_role):
ret['script'] = None
return ret
def get_related(self, obj):
res = super(CustomInventoryScriptSerializer, self).get_related(obj)
res.update(dict(
object_roles = self.reverse('api:inventory_script_object_roles_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:inventory_script_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class InventorySourceOptionsSerializer(BaseSerializer):
credential = DeprecatedCredentialField(
help_text=_('Cloud credential to use for inventory updates.')
)
class Meta:
fields = ('*', 'source', 'source_path', 'source_script', 'source_vars', 'credential',
'source_regions', 'instance_filters', 'group_by', 'overwrite', 'overwrite_vars',
'custom_virtualenv', 'timeout', 'verbosity')
def get_related(self, obj):
res = super(InventorySourceOptionsSerializer, self).get_related(obj)
if obj.credential: # TODO: remove when 'credential' field is removed
res['credential'] = self.reverse('api:credential_detail',
kwargs={'pk': obj.credential})
if obj.source_script:
res['source_script'] = self.reverse('api:inventory_script_detail', kwargs={'pk': obj.source_script.pk})
return res
def validate_source_vars(self, value):
ret = vars_validate_or_raise(value)
for env_k in parse_yaml_or_json(value):
if env_k in settings.INV_ENV_VARIABLE_BLACKLIST:
raise serializers.ValidationError(_("`{}` is a prohibited environment variable".format(env_k)))
return ret
def validate(self, attrs):
# TODO: Validate source, validate source_regions
errors = {}
source = attrs.get('source', self.instance and self.instance.source or '')
source_script = attrs.get('source_script', self.instance and self.instance.source_script or '')
if source == 'custom':
if source_script is None or source_script == '':
errors['source_script'] = _("If 'source' is 'custom', 'source_script' must be provided.")
else:
try:
if not self.instance:
dest_inventory = attrs.get('inventory', None)
if not dest_inventory:
errors['inventory'] = _("Must provide an inventory.")
else:
dest_inventory = self.instance.inventory
if dest_inventory and source_script.organization != dest_inventory.organization:
errors['source_script'] = _("The 'source_script' does not belong to the same organization as the inventory.")
except Exception:
errors['source_script'] = _("'source_script' doesn't exist.")
logger.exception('Problem processing source_script validation.')
if errors:
raise serializers.ValidationError(errors)
return super(InventorySourceOptionsSerializer, self).validate(attrs)
# TODO: remove when old 'credential' fields are removed
def get_summary_fields(self, obj):
summary_fields = super(InventorySourceOptionsSerializer, self).get_summary_fields(obj)
all_creds = []
if 'credential' in summary_fields:
cred = obj.get_cloud_credential()
if cred:
summarized_cred = {
'id': cred.id, 'name': cred.name, 'description': cred.description,
'kind': cred.kind, 'cloud': True
}
summary_fields['credential'] = summarized_cred
all_creds.append(summarized_cred)
if self.version > 1:
summary_fields['credential']['credential_type_id'] = cred.credential_type_id
else:
summary_fields.pop('credential')
if self.version > 1:
summary_fields['credentials'] = all_creds
return summary_fields
class InventorySourceSerializer(UnifiedJobTemplateSerializer, InventorySourceOptionsSerializer):
status = serializers.ChoiceField(choices=InventorySource.INVENTORY_SOURCE_STATUS_CHOICES, read_only=True)
last_update_failed = serializers.BooleanField(read_only=True)
last_updated = serializers.DateTimeField(read_only=True)
show_capabilities = ['start', 'schedule', 'edit', 'delete']
capabilities_prefetch = [
{'admin': 'inventory.admin'},
{'start': 'inventory.update'}
]
group = serializers.SerializerMethodField(
help_text=_('Automatic group relationship, will be removed in 3.3'))
class Meta:
model = InventorySource
fields = ('*', 'name', 'inventory', 'update_on_launch', 'update_cache_timeout',
'source_project', 'update_on_project_update') + \
('last_update_failed', 'last_updated', 'group') # Backwards compatibility.
def get_related(self, obj):
res = super(InventorySourceSerializer, self).get_related(obj)
res.update(dict(
update = self.reverse('api:inventory_source_update_view', kwargs={'pk': obj.pk}),
inventory_updates = self.reverse('api:inventory_source_updates_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:inventory_source_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:inventory_source_activity_stream_list', kwargs={'pk': obj.pk}),
hosts = self.reverse('api:inventory_source_hosts_list', kwargs={'pk': obj.pk}),
groups = self.reverse('api:inventory_source_groups_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:inventory_source_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:inventory_source_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:inventory_source_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if obj.source_project_id is not None:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': obj.source_project.pk})
# Backwards compatibility.
if obj.current_update:
res['current_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.current_update.pk})
if obj.last_update:
res['last_update'] = self.reverse('api:inventory_update_detail',
kwargs={'pk': obj.last_update.pk})
if self.version == 1: # TODO: remove in 3.3
if obj.deprecated_group:
res['group'] = self.reverse('api:group_detail', kwargs={'pk': obj.deprecated_group.pk})
else:
res['credentials'] = self.reverse('api:inventory_source_credentials_list', kwargs={'pk': obj.pk})
return res
def get_fields(self): # TODO: remove in 3.3
fields = super(InventorySourceSerializer, self).get_fields()
if self.version > 1:
fields.pop('group', None)
return fields
def get_summary_fields(self, obj): # TODO: remove in 3.3
summary_fields = super(InventorySourceSerializer, self).get_summary_fields(obj)
if self.version == 1 and obj.deprecated_group_id:
g = obj.deprecated_group
summary_fields['group'] = {}
for field in SUMMARIZABLE_FK_FIELDS['group']:
fval = getattr(g, field, None)
if fval is not None:
summary_fields['group'][field] = fval
return summary_fields
def get_group(self, obj): # TODO: remove in 3.3
if obj.deprecated_group:
return obj.deprecated_group.id
return None
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(InventorySourceSerializer, self).build_relational_field(field_name, relation_info)
# SCM Project and inventory are read-only unless creating a new inventory.
if self.instance and field_name == 'inventory':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
# TODO: remove when old 'credential' fields are removed
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name, self.credential)
return super(InventorySourceOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
def to_representation(self, obj):
ret = super(InventorySourceSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
return ret
def validate_source_project(self, value):
if value and value.scm_type == '':
raise serializers.ValidationError(_("Cannot use manual project for SCM-based inventory."))
return value
def validate_source(self, value):
if value == '':
raise serializers.ValidationError(_(
"Manual inventory sources are created automatically when a group is created in the v1 API."))
return value
def validate_update_on_project_update(self, value):
if value and self.instance and self.instance.schedules.exists():
raise serializers.ValidationError(_("Setting not compatible with existing schedules."))
return value
def validate_inventory(self, value):
if value and value.kind == 'smart':
raise serializers.ValidationError({"detail": _("Cannot create Inventory Source for Smart Inventory")})
return value
# TODO: remove when old 'credential' fields are removed
def create(self, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).create(validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def update(self, obj, validated_data):
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(InventorySourceSerializer, self).update(obj, validated_data)
if deprecated_fields:
self._update_deprecated_fields(deprecated_fields, obj)
return obj
# TODO: remove when old 'credential' fields are removed
def _update_deprecated_fields(self, fields, obj):
if 'credential' in fields:
new_cred = fields['credential']
existing = obj.credentials.all()
if new_cred not in existing:
for cred in existing:
# Remove all other cloud credentials
obj.credentials.remove(cred)
if new_cred:
# Add new credential
obj.credentials.add(new_cred)
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when 'credential' field removed
deprecated_fields['credential'] = attrs.pop('credential')
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
if get_field_from_model_or_attrs('source') != 'scm':
redundant_scm_fields = list(filter(
lambda x: attrs.get(x, None),
['source_project', 'source_path', 'update_on_project_update']
))
if redundant_scm_fields:
raise serializers.ValidationError(
{"detail": _("Cannot set %s if not SCM type." % ' '.join(redundant_scm_fields))}
)
attrs = super(InventorySourceSerializer, self).validate(attrs)
# Check type consistency of source and cloud credential, if provided
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
cred_error = InventorySource.cloud_credential_validation(
get_field_from_model_or_attrs('source'),
cred
)
if cred_error:
raise serializers.ValidationError({"credential": cred_error})
return attrs
class InventorySourceUpdateSerializer(InventorySourceSerializer):
can_update = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_update',)
class InventoryUpdateSerializer(UnifiedJobSerializer, InventorySourceOptionsSerializer):
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = InventoryUpdate
fields = ('*', 'inventory', 'inventory_source', 'license_error', 'org_host_limit_error',
'source_project_update', 'custom_virtualenv', '-controller_node',)
def get_related(self, obj):
res = super(InventoryUpdateSerializer, self).get_related(obj)
try:
res.update(dict(
inventory_source = self.reverse(
'api:inventory_source_detail', kwargs={'pk': obj.inventory_source.pk}
),
))
except ObjectDoesNotExist:
pass
res.update(dict(
cancel = self.reverse('api:inventory_update_cancel', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:inventory_update_notifications_list', kwargs={'pk': obj.pk}),
events = self.reverse('api:inventory_update_events_list', kwargs={'pk': obj.pk}),
))
if obj.source_project_update_id:
res['source_project_update'] = self.reverse('api:project_update_detail',
kwargs={'pk': obj.source_project_update.pk})
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
if self.version > 1:
res['credentials'] = self.reverse('api:inventory_update_credentials_list', kwargs={'pk': obj.pk})
return res
class InventoryUpdateDetailSerializer(InventoryUpdateSerializer):
source_project = serializers.SerializerMethodField(
help_text=_('The project used for this job.'),
method_name='get_source_project_id'
)
class Meta:
model = InventoryUpdate
fields = ('*', 'source_project',)
def get_source_project(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template', None)
def get_source_project_id(self, obj):
return getattrd(obj, 'source_project_update.unified_job_template.id', None)
def get_related(self, obj):
res = super(InventoryUpdateDetailSerializer, self).get_related(obj)
source_project_id = self.get_source_project_id(obj)
if source_project_id:
res['source_project'] = self.reverse('api:project_detail', kwargs={'pk': source_project_id})
return res
def get_summary_fields(self, obj):
summary_fields = super(InventoryUpdateDetailSerializer, self).get_summary_fields(obj)
summary_obj = self.get_source_project(obj)
if summary_obj:
summary_fields['source_project'] = {}
for field in SUMMARIZABLE_FK_FIELDS['project']:
value = getattr(summary_obj, field, None)
if value is not None:
summary_fields['source_project'][field] = value
return summary_fields
class InventoryUpdateListSerializer(InventoryUpdateSerializer, UnifiedJobListSerializer):
class Meta:
model = InventoryUpdate
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class InventoryUpdateCancelSerializer(InventoryUpdateSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class TeamSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
class Meta:
model = Team
fields = ('*', 'organization')
def get_related(self, obj):
res = super(TeamSerializer, self).get_related(obj)
res.update(dict(
projects = self.reverse('api:team_projects_list', kwargs={'pk': obj.pk}),
users = self.reverse('api:team_users_list', kwargs={'pk': obj.pk}),
credentials = self.reverse('api:team_credentials_list', kwargs={'pk': obj.pk}),
roles = self.reverse('api:team_roles_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:team_object_roles_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:team_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:team_access_list', kwargs={'pk': obj.pk}),
))
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def to_representation(self, obj):
ret = super(TeamSerializer, self).to_representation(obj)
if obj is not None and 'organization' in ret and not obj.organization:
ret['organization'] = None
return ret
class RoleSerializer(BaseSerializer):
class Meta:
model = Role
fields = ('*', '-created', '-modified')
read_only_fields = ('id', 'role_field', 'description', 'name')
def to_representation(self, obj):
ret = super(RoleSerializer, self).to_representation(obj)
if obj.object_id:
content_object = obj.content_object
if hasattr(content_object, 'username'):
ret['summary_fields']['resource_name'] = obj.content_object.username
if hasattr(content_object, 'name'):
ret['summary_fields']['resource_name'] = obj.content_object.name
content_model = obj.content_type.model_class()
ret['summary_fields']['resource_type'] = get_type_for_model(content_model)
ret['summary_fields']['resource_type_display_name'] = content_model._meta.verbose_name.title()
return ret
def get_related(self, obj):
ret = super(RoleSerializer, self).get_related(obj)
ret['users'] = self.reverse('api:role_users_list', kwargs={'pk': obj.pk})
ret['teams'] = self.reverse('api:role_teams_list', kwargs={'pk': obj.pk})
try:
if obj.content_object:
ret.update(reverse_gfk(obj.content_object, self.context.get('request')))
except AttributeError:
# AttributeError's happen if our content_object is pointing at
# a model that no longer exists. This is dirty data and ideally
# doesn't exist, but in case it does, let's not puke.
pass
return ret
class RoleSerializerWithParentAccess(RoleSerializer):
show_capabilities = ['unattach']
class ResourceAccessListElementSerializer(UserSerializer):
show_capabilities = [] # Clear fields from UserSerializer parent class
def to_representation(self, user):
'''
With this method we derive "direct" and "indirect" access lists. Contained
in the direct access list are all the roles the user is a member of, and
all of the roles that are directly granted to any teams that the user is a
member of.
The indirect access list is a list of all of the roles that the user is
a member of that are ancestors of any roles that grant permissions to
the resource.
'''
ret = super(ResourceAccessListElementSerializer, self).to_representation(user)
obj = self.context['view'].get_parent_object()
if self.context['view'].request is not None:
requesting_user = self.context['view'].request.user
else:
requesting_user = None
if 'summary_fields' not in ret:
ret['summary_fields'] = {}
def format_role_perm(role):
role_dict = { 'id': role.id, 'name': role.name, 'description': role.description}
try:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
except AttributeError:
pass
if role.content_type is not None:
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, user, 'members', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
return { 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, role)}
def format_team_role_perm(naive_team_role, permissive_role_ids):
ret = []
team_role = naive_team_role
if naive_team_role.role_field == 'admin_role':
team_role = naive_team_role.content_object.member_role
for role in team_role.children.filter(id__in=permissive_role_ids).all():
role_dict = {
'id': role.id,
'name': role.name,
'description': role.description,
'team_id': team_role.object_id,
'team_name': team_role.content_object.name,
'team_organization_name': team_role.content_object.organization.name,
}
if role.content_type is not None:
role_dict['resource_name'] = role.content_object.name
role_dict['resource_type'] = get_type_for_model(role.content_type.model_class())
role_dict['related'] = reverse_gfk(role.content_object, self.context.get('request'))
role_dict['user_capabilities'] = {'unattach': requesting_user.can_access(
Role, 'unattach', role, team_role, 'parents', data={}, skip_sub_obj_read_check=False)}
else:
# Singleton roles should not be managed from this view, as per copy/edit rework spec
role_dict['user_capabilities'] = {'unattach': False}
ret.append({ 'role': role_dict, 'descendant_roles': get_roles_on_resource(obj, team_role)})
return ret
team_content_type = ContentType.objects.get_for_model(Team)
content_type = ContentType.objects.get_for_model(obj)
direct_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('id', flat=True)
all_permissive_role_ids = Role.objects.filter(content_type=content_type, object_id=obj.id).values_list('ancestors__id', flat=True)
direct_access_roles = user.roles \
.filter(id__in=direct_permissive_role_ids).all()
direct_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=direct_permissive_role_ids)
if content_type == team_content_type:
# When looking at the access list for a team, exclude the entries
# for that team. This exists primarily so we don't list the read role
# as a direct role when a user is a member or admin of a team
direct_team_roles = direct_team_roles.exclude(
children__content_type=team_content_type,
children__object_id=obj.id
)
indirect_team_roles = Role.objects \
.filter(content_type=team_content_type,
members=user,
children__in=all_permissive_role_ids) \
.exclude(id__in=direct_team_roles)
indirect_access_roles = user.roles \
.filter(id__in=all_permissive_role_ids) \
.exclude(id__in=direct_permissive_role_ids) \
.exclude(id__in=direct_team_roles) \
.exclude(id__in=indirect_team_roles)
ret['summary_fields']['direct_access'] \
= [format_role_perm(r) for r in direct_access_roles.distinct()] \
+ [y for x in (format_team_role_perm(r, direct_permissive_role_ids) for r in direct_team_roles.distinct()) for y in x] \
+ [y for x in (format_team_role_perm(r, all_permissive_role_ids) for r in indirect_team_roles.distinct()) for y in x]
ret['summary_fields']['indirect_access'] \
= [format_role_perm(r) for r in indirect_access_roles.distinct()]
return ret
class CredentialTypeSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete']
managed_by_tower = serializers.ReadOnlyField()
class Meta:
model = CredentialType
fields = ('*', 'kind', 'namespace', 'name', 'managed_by_tower', 'inputs',
'injectors')
def validate(self, attrs):
if self.instance and self.instance.managed_by_tower:
raise PermissionDenied(
detail=_("Modifications not allowed for managed credential types")
)
if self.instance and self.instance.credentials.exists():
if 'inputs' in attrs and attrs['inputs'] != self.instance.inputs:
raise PermissionDenied(
detail= _("Modifications to inputs are not allowed for credential types that are in use")
)
ret = super(CredentialTypeSerializer, self).validate(attrs)
if 'kind' in attrs and attrs['kind'] not in ('cloud', 'net'):
raise serializers.ValidationError({
"kind": _("Must be 'cloud' or 'net', not %s") % attrs['kind']
})
fields = attrs.get('inputs', {}).get('fields', [])
for field in fields:
if field.get('ask_at_runtime', False):
raise serializers.ValidationError({"inputs": _("'ask_at_runtime' is not supported for custom credentials.")})
return ret
def get_related(self, obj):
res = super(CredentialTypeSerializer, self).get_related(obj)
res['credentials'] = self.reverse(
'api:credential_type_credential_list',
kwargs={'pk': obj.pk}
)
res['activity_stream'] = self.reverse(
'api:credential_type_activity_stream_list',
kwargs={'pk': obj.pk}
)
return res
def to_representation(self, data):
value = super(CredentialTypeSerializer, self).to_representation(data)
# translate labels and help_text for credential fields "managed by Tower"
if value.get('managed_by_tower'):
value['name'] = _(value['name'])
for field in value.get('inputs', {}).get('fields', []):
field['label'] = _(field['label'])
if 'help_text' in field:
field['help_text'] = _(field['help_text'])
return value
def filter_field_metadata(self, fields, method):
# API-created/modified CredentialType kinds are limited to
# `cloud` and `net`
if method in ('PUT', 'POST'):
fields['kind']['choices'] = list(filter(
lambda choice: choice[0] in ('cloud', 'net'),
fields['kind']['choices']
))
return fields
# TODO: remove when API v1 is removed
class V1CredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'kind', 'cloud', 'host', 'username',
'password', 'security_token', 'project', 'domain',
'ssh_key_data', 'ssh_key_unlock', 'become_method',
'become_username', 'become_password', 'vault_password',
'subscription', 'tenant', 'secret', 'client', 'authorize',
'authorize_password')
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in V1Credential.FIELDS:
return self.build_standard_field(field_name,
V1Credential.FIELDS[field_name])
return super(V1CredentialFields, self).build_field(field_name, info, model_class, nested_depth)
class V2CredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'credential_type', 'inputs')
extra_kwargs = {
'credential_type': {
'label': _('Credential Type'),
},
}
class CredentialSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy', 'use']
capabilities_prefetch = ['admin', 'use']
class Meta:
model = Credential
fields = ('*', 'organization')
def get_fields(self):
fields = super(CredentialSerializer, self).get_fields()
# TODO: remove when API v1 is removed
if self.version == 1:
fields.update(V1CredentialFields().get_fields())
else:
fields.update(V2CredentialFields().get_fields())
return fields
def to_representation(self, data):
value = super(CredentialSerializer, self).to_representation(data)
# TODO: remove when API v1 is removed
if self.version == 1:
if value.get('kind') == 'vault':
value['kind'] = 'ssh'
for field in V1Credential.PASSWORD_FIELDS:
if field in value and force_text(value[field]).startswith('$encrypted$'):
value[field] = '$encrypted$'
if 'inputs' in value:
value['inputs'] = data.display_inputs()
return value
def get_related(self, obj):
res = super(CredentialSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
res.update(dict(
activity_stream = self.reverse('api:credential_activity_stream_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:credential_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:credential_object_roles_list', kwargs={'pk': obj.pk}),
owner_users = self.reverse('api:credential_owner_users_list', kwargs={'pk': obj.pk}),
owner_teams = self.reverse('api:credential_owner_teams_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:credential_copy', kwargs={'pk': obj.pk})
res['input_sources'] = self.reverse('api:credential_input_source_sublist', kwargs={'pk': obj.pk})
# TODO: remove when API v1 is removed
if self.version > 1:
res.update(dict(
credential_type = self.reverse('api:credential_type_detail', kwargs={'pk': obj.credential_type.pk}),
))
parents = [role for role in obj.admin_role.parents.all() if role.object_id is not None]
if parents:
res.update({parents[0].content_type.name:parents[0].content_object.get_absolute_url(self.context.get('request'))})
elif len(obj.admin_role.members.all()) > 0:
user = obj.admin_role.members.all()[0]
res.update({'user': self.reverse('api:user_detail', kwargs={'pk': user.pk})})
return res
def get_summary_fields(self, obj):
summary_dict = super(CredentialSerializer, self).get_summary_fields(obj)
summary_dict['owners'] = []
for user in obj.admin_role.members.all():
summary_dict['owners'].append({
'id': user.pk,
'type': 'user',
'name': user.username,
'description': ' '.join([user.first_name, user.last_name]),
'url': self.reverse('api:user_detail', kwargs={'pk': user.pk}),
})
for parent in [role for role in obj.admin_role.parents.all() if role.object_id is not None]:
summary_dict['owners'].append({
'id': parent.content_object.pk,
'type': camelcase_to_underscore(parent.content_object.__class__.__name__),
'name': parent.content_object.name,
'description': parent.content_object.description,
'url': parent.content_object.get_absolute_url(self.context.get('request')),
})
return summary_dict
def get_validation_exclusions(self, obj=None):
# CredentialType is now part of validation; legacy v1 fields (e.g.,
# 'username', 'password') in JSON POST payloads use the
# CredentialType's inputs definition to determine their validity
ret = super(CredentialSerializer, self).get_validation_exclusions(obj)
for field in ('credential_type', 'inputs'):
if field in ret:
ret.remove(field)
return ret
def to_internal_value(self, data):
# TODO: remove when API v1 is removed
if 'credential_type' not in data and self.version == 1:
# If `credential_type` is not provided, assume the payload is a
# v1 credential payload that specifies a `kind` and a flat list
# of field values
#
# In this scenario, we should automatically detect the proper
# CredentialType based on the provided values
kind = data.get('kind', 'ssh')
credential_type = CredentialType.from_v1_kind(kind, data)
if credential_type is None:
raise serializers.ValidationError({"kind": _('"%s" is not a valid choice' % kind)})
data['credential_type'] = credential_type.pk
value = OrderedDict(
list({'credential_type': credential_type}.items()) +
list(super(CredentialSerializer, self).to_internal_value(data).items())
)
# Make a set of the keys in the POST/PUT payload
# - Subtract real fields (name, organization, inputs)
# - Subtract virtual v1 fields defined on the determined credential
# type (username, password, etc...)
# - Any leftovers are invalid for the determined credential type
valid_fields = set(super(CredentialSerializer, self).get_fields().keys())
valid_fields.update(V2CredentialFields().get_fields().keys())
valid_fields.update(['kind', 'cloud'])
for field in set(data.keys()) - valid_fields - set(credential_type.defined_fields):
if data.get(field):
raise serializers.ValidationError(
{"detail": _("'{field_name}' is not a valid field for {credential_type_name}").format(
field_name=field, credential_type_name=credential_type.name
)}
)
value.pop('kind', None)
return value
return super(CredentialSerializer, self).to_internal_value(data)
def validate_credential_type(self, credential_type):
if self.instance and credential_type.pk != self.instance.credential_type.pk:
for rel in (
'ad_hoc_commands',
'insights_inventories',
'unifiedjobs',
'unifiedjobtemplates',
'projects',
'projectupdates',
'workflowjobnodes'
):
if getattr(self.instance, rel).count() > 0:
raise ValidationError(
_('You cannot change the credential type of the credential, as it may break the functionality'
' of the resources using it.'),
)
return credential_type
class CredentialSerializerCreate(CredentialSerializer):
user = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add user to owner role. If provided, '
'do not give either team or organization. Only valid for creation.'))
team = serializers.PrimaryKeyRelatedField(
queryset=Team.objects.all(),
required=False, default=None, write_only=True, allow_null=True,
help_text=_('Write-only field used to add team to owner role. If provided, '
'do not give either user or organization. Only valid for creation.'))
organization = serializers.PrimaryKeyRelatedField(
queryset=Organization.objects.all(),
required=False, default=None, allow_null=True,
help_text=_('Inherit permissions from organization roles. If provided on creation, '
'do not give either user or team.'))
class Meta:
model = Credential
fields = ('*', 'user', 'team')
def validate(self, attrs):
owner_fields = set()
for field in ('user', 'team', 'organization'):
if field in attrs:
if attrs[field]:
owner_fields.add(field)
else:
attrs.pop(field)
if not owner_fields:
raise serializers.ValidationError({"detail": _("Missing 'user', 'team', or 'organization'.")})
if attrs.get('team'):
attrs['organization'] = attrs['team'].organization
try:
return super(CredentialSerializerCreate, self).validate(attrs)
except ValidationError as e:
# TODO: remove when API v1 is removed
# If we have an `inputs` error on `/api/v1/`:
# {'inputs': {'username': [...]}}
# ...instead, send back:
# {'username': [...]}
if self.version == 1 and isinstance(e.detail.get('inputs'), dict):
e.detail = e.detail['inputs']
raise e
else:
raise
def create(self, validated_data):
user = validated_data.pop('user', None)
team = validated_data.pop('team', None)
# If our payload contains v1 credential fields, translate to the new
# model
# TODO: remove when API v1 is removed
if self.version == 1:
for attr in (
set(V1Credential.FIELDS) & set(validated_data.keys()) # set intersection
):
validated_data.setdefault('inputs', {})
value = validated_data.pop(attr)
if value:
validated_data['inputs'][attr] = value
credential = super(CredentialSerializerCreate, self).create(validated_data)
if user:
credential.admin_role.members.add(user)
if team:
if not credential.organization or team.organization.id != credential.organization.id:
raise serializers.ValidationError({"detail": _("Credential organization must be set and match before assigning to a team")})
credential.admin_role.parents.add(team.admin_role)
credential.use_role.parents.add(team.member_role)
return credential
class CredentialInputSourceSerializer(BaseSerializer):
show_capabilities = ['delete']
class Meta:
model = CredentialInputSource
fields = (
'*',
'input_field_name',
'metadata',
'target_credential',
'source_credential',
'-name',
)
extra_kwargs = {
'input_field_name': {'required': True},
'target_credential': {'required': True},
'source_credential': {'required': True},
}
def get_related(self, obj):
res = super(CredentialInputSourceSerializer, self).get_related(obj)
res['source_credential'] = obj.source_credential.get_absolute_url(request=self.context.get('request'))
res['target_credential'] = obj.target_credential.get_absolute_url(request=self.context.get('request'))
return res
class UserCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-team', '-organization')
class TeamCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-organization')
class OrganizationCredentialSerializerCreate(CredentialSerializerCreate):
class Meta:
model = Credential
fields = ('*', '-user', '-team')
class LabelsListMixin(object):
def _summary_field_labels(self, obj):
label_list = [{'id': x.id, 'name': x.name} for x in obj.labels.all()[:10]]
if has_model_field_prefetched(obj, 'labels'):
label_ct = len(obj.labels.all())
else:
if len(label_list) < 10:
label_ct = len(label_list)
else:
label_ct = obj.labels.count()
return {'count': label_ct, 'results': label_list}
def get_summary_fields(self, obj):
res = super(LabelsListMixin, self).get_summary_fields(obj)
res['labels'] = self._summary_field_labels(obj)
return res
# TODO: remove when API v1 is removed
class V1JobOptionsSerializer(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'cloud_credential', 'network_credential')
V1_FIELDS = ('cloud_credential', 'network_credential',)
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in self.V1_FIELDS:
return (DeprecatedCredentialField, {})
return super(V1JobOptionsSerializer, self).build_field(field_name, info, model_class, nested_depth)
class LegacyCredentialFields(BaseSerializer, metaclass=BaseSerializerMetaclass):
class Meta:
model = Credential
fields = ('*', 'credential', 'vault_credential')
LEGACY_FIELDS = ('credential', 'vault_credential',)
def build_field(self, field_name, info, model_class, nested_depth):
if field_name in self.LEGACY_FIELDS:
return (DeprecatedCredentialField, {})
return super(LegacyCredentialFields, self).build_field(field_name, info, model_class, nested_depth)
class JobOptionsSerializer(LabelsListMixin, BaseSerializer):
class Meta:
fields = ('*', 'job_type', 'inventory', 'project', 'playbook',
'forks', 'limit', 'verbosity', 'extra_vars', 'job_tags',
'force_handlers', 'skip_tags', 'start_at_task', 'timeout',
'use_fact_cache',)
def get_fields(self):
fields = super(JobOptionsSerializer, self).get_fields()
# TODO: remove when API v1 is removed
if self.version == 1:
fields.update(V1JobOptionsSerializer().get_fields())
fields.update(LegacyCredentialFields().get_fields())
return fields
def get_related(self, obj):
res = super(JobOptionsSerializer, self).get_related(obj)
res['labels'] = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk})
try:
if obj.inventory:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory.pk})
except ObjectDoesNotExist:
setattr(obj, 'inventory', None)
try:
if obj.project:
res['project'] = self.reverse('api:project_detail', kwargs={'pk': obj.project.pk})
except ObjectDoesNotExist:
setattr(obj, 'project', None)
try:
if obj.credential:
res['credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.credential}
)
except ObjectDoesNotExist:
setattr(obj, 'credential', None)
try:
if obj.vault_credential:
res['vault_credential'] = self.reverse(
'api:credential_detail', kwargs={'pk': obj.vault_credential}
)
except ObjectDoesNotExist:
setattr(obj, 'vault_credential', None)
if self.version > 1:
if isinstance(obj, UnifiedJobTemplate):
res['extra_credentials'] = self.reverse(
'api:job_template_extra_credentials_list',
kwargs={'pk': obj.pk}
)
res['credentials'] = self.reverse(
'api:job_template_credentials_list',
kwargs={'pk': obj.pk}
)
elif isinstance(obj, UnifiedJob):
res['extra_credentials'] = self.reverse('api:job_extra_credentials_list', kwargs={'pk': obj.pk})
res['credentials'] = self.reverse('api:job_credentials_list', kwargs={'pk': obj.pk})
else:
cloud_cred = obj.cloud_credential
if cloud_cred:
res['cloud_credential'] = self.reverse('api:credential_detail', kwargs={'pk': cloud_cred})
net_cred = obj.network_credential
if net_cred:
res['network_credential'] = self.reverse('api:credential_detail', kwargs={'pk': net_cred})
return res
def to_representation(self, obj):
ret = super(JobOptionsSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'inventory' in ret and not obj.inventory:
ret['inventory'] = None
if 'project' in ret and not obj.project:
ret['project'] = None
if 'playbook' in ret:
ret['playbook'] = ''
ret['credential'] = obj.credential
ret['vault_credential'] = obj.vault_credential
if self.version == 1:
ret['cloud_credential'] = obj.cloud_credential
ret['network_credential'] = obj.network_credential
return ret
def create(self, validated_data):
deprecated_fields = {}
for key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'):
if key in validated_data:
deprecated_fields[key] = validated_data.pop(key)
obj = super(JobOptionsSerializer, self).create(validated_data)
if deprecated_fields: # TODO: remove in 3.3
self._update_deprecated_fields(deprecated_fields, obj)
return obj
def update(self, obj, validated_data):
deprecated_fields = {}
for key in ('credential', 'vault_credential', 'cloud_credential', 'network_credential'):
if key in validated_data:
deprecated_fields[key] = validated_data.pop(key)
obj = super(JobOptionsSerializer, self).update(obj, validated_data)
if deprecated_fields: # TODO: remove in 3.3
self._update_deprecated_fields(deprecated_fields, obj)
return obj
def _update_deprecated_fields(self, fields, obj):
for key, existing in (
('credential', obj.credentials.filter(credential_type__kind='ssh')),
('vault_credential', obj.credentials.filter(credential_type__kind='vault')),
('cloud_credential', obj.cloud_credentials),
('network_credential', obj.network_credentials),
):
if key in fields:
new_cred = fields[key]
if new_cred not in existing:
for cred in existing:
obj.credentials.remove(cred)
if new_cred:
obj.credentials.add(new_cred)
def validate(self, attrs):
v1_credentials = {}
view = self.context.get('view', None)
for attr, kind, error in (
('cloud_credential', 'cloud', _('You must provide a cloud credential.')),
('network_credential', 'net', _('You must provide a network credential.')),
('credential', 'ssh', _('You must provide an SSH credential.')),
('vault_credential', 'vault', _('You must provide a vault credential.')),
):
if kind in ('cloud', 'net') and self.version > 1:
continue # cloud and net deprecated creds are v1 only
if attr in attrs:
v1_credentials[attr] = None
pk = attrs.pop(attr)
if pk:
cred = v1_credentials[attr] = Credential.objects.get(pk=pk)
if cred.credential_type.kind != kind:
raise serializers.ValidationError({attr: error})
if ((not self.instance or cred.pk != getattr(self.instance, attr)) and
view and view.request and view.request.user not in cred.use_role):
raise PermissionDenied()
if 'project' in self.fields and 'playbook' in self.fields:
project = attrs.get('project', self.instance and self.instance.project or None)
playbook = attrs.get('playbook', self.instance and self.instance.playbook or '')
if not project:
raise serializers.ValidationError({'project': _('This field is required.')})
if project and project.scm_type and playbook and force_text(playbook) not in project.playbook_files:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not project.scm_type and playbook and force_text(playbook) not in project.playbooks:
raise serializers.ValidationError({'playbook': _('Playbook not found for project.')})
if project and not playbook:
raise serializers.ValidationError({'playbook': _('Must select playbook for project.')})
ret = super(JobOptionsSerializer, self).validate(attrs)
ret.update(v1_credentials)
return ret
class JobTemplateMixin(object):
'''
Provide recent jobs and survey details in summary_fields
'''
def _recent_jobs(self, obj):
# Exclude "joblets", jobs that ran as part of a sliced workflow job
uj_qs = obj.unifiedjob_unified_jobs.exclude(job__job_slice_count__gt=1).order_by('-created')
# Would like to apply an .only, but does not play well with non_polymorphic
# .only('id', 'status', 'finished', 'polymorphic_ctype_id')
optimized_qs = uj_qs.non_polymorphic()
return [{
'id': x.id, 'status': x.status, 'finished': x.finished,
# Make type consistent with API top-level key, for instance workflow_job
'type': x.get_real_instance_class()._meta.verbose_name.replace(' ', '_')
} for x in optimized_qs[:10]]
def get_summary_fields(self, obj):
d = super(JobTemplateMixin, self).get_summary_fields(obj)
if obj.survey_spec is not None and ('name' in obj.survey_spec and 'description' in obj.survey_spec):
d['survey'] = dict(title=obj.survey_spec['name'], description=obj.survey_spec['description'])
d['recent_jobs'] = self._recent_jobs(obj)
# TODO: remove in 3.3
if self.version == 1 and 'vault_credential' in d:
if d['vault_credential'].get('kind','') == 'vault':
d['vault_credential']['kind'] = 'ssh'
return d
class JobTemplateSerializer(JobTemplateMixin, UnifiedJobTemplateSerializer, JobOptionsSerializer):
show_capabilities = ['start', 'schedule', 'copy', 'edit', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': ['project.use', 'inventory.use']}
]
status = serializers.ChoiceField(choices=JobTemplate.JOB_TEMPLATE_STATUS_CHOICES, read_only=True, required=False)
class Meta:
model = JobTemplate
fields = ('*', 'host_config_key', 'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch', 'ask_inventory_on_launch',
'ask_credential_on_launch', 'survey_enabled', 'become_enabled', 'diff_mode',
'allow_simultaneous', 'custom_virtualenv', 'job_slice_count')
def get_related(self, obj):
res = super(JobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:job_template_schedules_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_template_activity_stream_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:job_template_access_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:job_template_survey_spec', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_template_label_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:job_template_object_roles_list', kwargs={'pk': obj.pk}),
instance_groups = self.reverse('api:job_template_instance_groups_list', kwargs={'pk': obj.pk}),
slice_workflow_jobs = self.reverse('api:job_template_slice_workflow_jobs_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:job_template_copy', kwargs={'pk': obj.pk})
if obj.host_config_key:
res['callback'] = self.reverse('api:job_template_callback', kwargs={'pk': obj.pk})
return res
def validate(self, attrs):
def get_field_from_model_or_attrs(fd):
return attrs.get(fd, self.instance and getattr(self.instance, fd) or None)
inventory = get_field_from_model_or_attrs('inventory')
project = get_field_from_model_or_attrs('project')
if get_field_from_model_or_attrs('host_config_key') and not inventory:
raise serializers.ValidationError({'host_config_key': _(
"Cannot enable provisioning callback without an inventory set."
)})
prompting_error_message = _("Must either set a default value or ask to prompt on launch.")
if project is None:
raise serializers.ValidationError({'project': _("Job Templates must have a project assigned.")})
elif inventory is None and not get_field_from_model_or_attrs('ask_inventory_on_launch'):
raise serializers.ValidationError({'inventory': prompting_error_message})
return super(JobTemplateSerializer, self).validate(attrs)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def get_summary_fields(self, obj):
summary_fields = super(JobTemplateSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
# TODO: remove most of this as v1 is removed
vault_credential = None
credential = None
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
if self.version > 1:
summarized_cred['credential_type_id'] = cred.credential_type_id
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
elif summarized_cred['kind'] == 'ssh':
credential = summarized_cred
elif summarized_cred['kind'] == 'vault':
vault_credential = summarized_cred
# Selectively apply those fields, depending on view deetails
if (self.is_detail_view or self.version == 1) and credential:
summary_fields['credential'] = credential
else:
# Credential could be an empty dictionary in this case
summary_fields.pop('credential', None)
if (self.is_detail_view or self.version == 1) and vault_credential:
summary_fields['vault_credential'] = vault_credential
else:
# vault credential could be empty dictionary
summary_fields.pop('vault_credential', None)
if self.version > 1:
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobTemplateWithSpecSerializer(JobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = JobTemplate
fields = ('*', 'survey_spec')
class JobSerializer(UnifiedJobSerializer, JobOptionsSerializer):
passwords_needed_to_start = serializers.ReadOnlyField()
artifacts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('*', 'job_template', 'passwords_needed_to_start',
'allow_simultaneous', 'artifacts', 'scm_revision',
'instance_group', 'diff_mode', 'job_slice_number', 'job_slice_count')
def get_related(self, obj):
res = super(JobSerializer, self).get_related(obj)
res.update(dict(
job_events = self.reverse('api:job_job_events_list', kwargs={'pk': obj.pk}),
job_host_summaries = self.reverse('api:job_job_host_summaries_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:job_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:job_notifications_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:job_label_list', kwargs={'pk': obj.pk}),
))
try:
if obj.job_template:
res['job_template'] = self.reverse('api:job_template_detail',
kwargs={'pk': obj.job_template.pk})
except ObjectDoesNotExist:
setattr(obj, 'job_template', None)
if (obj.can_start or True) and self.version == 1: # TODO: remove in 3.3
res['start'] = self.reverse('api:job_start', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:job_cancel', kwargs={'pk': obj.pk})
try:
if obj.project_update:
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update.pk}
)
except ObjectDoesNotExist:
pass
if self.version > 1:
res['create_schedule'] = self.reverse('api:job_create_schedule', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:job_relaunch', kwargs={'pk': obj.pk})
return res
def get_artifacts(self, obj):
if obj:
return obj.display_artifacts()
return {}
def to_internal_value(self, data):
# When creating a new job and a job template is specified, populate any
# fields not provided in data from the job template.
if not self.instance and isinstance(data, dict) and data.get('job_template', False):
try:
job_template = JobTemplate.objects.get(pk=data['job_template'])
except JobTemplate.DoesNotExist:
raise serializers.ValidationError({'job_template': _('Invalid job template.')})
data.setdefault('name', job_template.name)
data.setdefault('description', job_template.description)
data.setdefault('job_type', job_template.job_type)
if job_template.inventory:
data.setdefault('inventory', job_template.inventory.pk)
if job_template.project:
data.setdefault('project', job_template.project.pk)
data.setdefault('playbook', job_template.playbook)
if job_template.credential:
data.setdefault('credential', job_template.credential)
data.setdefault('forks', job_template.forks)
data.setdefault('limit', job_template.limit)
data.setdefault('verbosity', job_template.verbosity)
data.setdefault('extra_vars', job_template.extra_vars)
data.setdefault('job_tags', job_template.job_tags)
data.setdefault('force_handlers', job_template.force_handlers)
data.setdefault('skip_tags', job_template.skip_tags)
data.setdefault('start_at_task', job_template.start_at_task)
return super(JobSerializer, self).to_internal_value(data)
def to_representation(self, obj):
ret = super(JobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'job_template' in ret and not obj.job_template:
ret['job_template'] = None
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(JobSerializer, self).get_summary_fields(obj)
all_creds = []
# Organize credential data into multitude of deprecated fields
# TODO: remove most of this as v1 is removed
vault_credential = None
credential = None
extra_creds = []
if obj.pk:
for cred in obj.credentials.all():
summarized_cred = {
'id': cred.pk,
'name': cred.name,
'description': cred.description,
'kind': cred.kind,
'cloud': cred.credential_type.kind == 'cloud'
}
if self.version > 1:
summarized_cred['credential_type_id'] = cred.credential_type_id
all_creds.append(summarized_cred)
if cred.credential_type.kind in ('cloud', 'net'):
extra_creds.append(summarized_cred)
elif summarized_cred['kind'] == 'ssh':
credential = summarized_cred
elif summarized_cred['kind'] == 'vault':
vault_credential = summarized_cred
# Selectively apply those fields, depending on view deetails
if (self.is_detail_view or self.version == 1) and credential:
summary_fields['credential'] = credential
else:
# Credential could be an empty dictionary in this case
summary_fields.pop('credential', None)
if (self.is_detail_view or self.version == 1) and vault_credential:
summary_fields['vault_credential'] = vault_credential
else:
# vault credential could be empty dictionary
summary_fields.pop('vault_credential', None)
if self.version > 1:
if self.is_detail_view:
summary_fields['extra_credentials'] = extra_creds
summary_fields['credentials'] = all_creds
return summary_fields
class JobDetailSerializer(JobSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
playbook_counts = serializers.SerializerMethodField(
help_text=_('A count of all plays and tasks for the job run.'),
)
custom_virtualenv = serializers.ReadOnlyField()
class Meta:
model = Job
fields = ('*', 'host_status_counts', 'playbook_counts', 'custom_virtualenv')
def get_playbook_counts(self, obj):
task_count = obj.job_events.filter(event='playbook_on_task_start').count()
play_count = obj.job_events.filter(event='playbook_on_play_start').count()
data = {'play_count': play_count, 'task_count': task_count}
return data
def get_host_status_counts(self, obj):
try:
counts = obj.job_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except JobEvent.DoesNotExist:
counts = {}
return counts
class JobCancelSerializer(BaseSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
model = Job
fields = ('can_cancel',)
class JobRelaunchSerializer(BaseSerializer):
passwords_needed_to_start = serializers.SerializerMethodField()
retry_counts = serializers.SerializerMethodField()
hosts = serializers.ChoiceField(
required=False, allow_null=True, default='all',
choices=[
('all', _('No change to job limit')),
('failed', _('All failed and unreachable hosts'))
],
write_only=True
)
credential_passwords = VerbatimField(required=True, write_only=True)
class Meta:
model = Job
fields = ('passwords_needed_to_start', 'retry_counts', 'hosts', 'credential_passwords',)
def validate_credential_passwords(self, value):
pnts = self.instance.passwords_needed_to_start
missing = set(pnts) - set(key for key in value if value[key])
if missing:
raise serializers.ValidationError(_(
'Missing passwords needed to start: {}'.format(', '.join(missing))
))
return value
def to_representation(self, obj):
res = super(JobRelaunchSerializer, self).to_representation(obj)
view = self.context.get('view', None)
if hasattr(view, '_raw_data_form_marker'):
password_keys = dict([(p, u'') for p in self.get_passwords_needed_to_start(obj)])
res.update(password_keys)
return res
def get_passwords_needed_to_start(self, obj):
if obj:
return obj.passwords_needed_to_start
return ''
def get_retry_counts(self, obj):
if obj.status in ACTIVE_STATES:
return _('Relaunch by host status not available until job finishes running.')
data = OrderedDict([])
for status in self.fields['hosts'].choices.keys():
data[status] = obj.retry_qs(status).count()
return data
def get_validation_exclusions(self, *args, **kwargs):
r = super(JobRelaunchSerializer, self).get_validation_exclusions(*args, **kwargs)
r.append('credential_passwords')
return r
def validate(self, attrs):
obj = self.instance
if obj.project is None:
raise serializers.ValidationError(dict(errors=[_("Job Template Project is missing or undefined.")]))
if obj.inventory is None or obj.inventory.pending_deletion:
raise serializers.ValidationError(dict(errors=[_("Job Template Inventory is missing or undefined.")]))
attrs = super(JobRelaunchSerializer, self).validate(attrs)
return attrs
class JobCreateScheduleSerializer(BaseSerializer):
can_schedule = serializers.SerializerMethodField()
prompts = serializers.SerializerMethodField()
class Meta:
model = Job
fields = ('can_schedule', 'prompts',)
def get_can_schedule(self, obj):
'''
Need both a job template and job prompts to schedule
'''
return obj.can_schedule
@staticmethod
def _summarize(res_name, obj):
summary = {}
for field in SUMMARIZABLE_FK_FIELDS[res_name]:
summary[field] = getattr(obj, field, None)
return summary
def get_prompts(self, obj):
try:
config = obj.launch_config
ret = config.prompts_dict(display=True)
if 'inventory' in ret:
ret['inventory'] = self._summarize('inventory', ret['inventory'])
if 'credentials' in ret:
all_creds = [self._summarize('credential', cred) for cred in ret['credentials']]
ret['credentials'] = all_creds
return ret
except JobLaunchConfig.DoesNotExist:
return {'all': _('Unknown, job may have been ran before launch configurations were saved.')}
class AdHocCommandSerializer(UnifiedJobSerializer):
class Meta:
model = AdHocCommand
fields = ('*', 'job_type', 'inventory', 'limit', 'credential',
'module_name', 'module_args', 'forks', 'verbosity', 'extra_vars',
'become_enabled', 'diff_mode', '-unified_job_template', '-description')
extra_kwargs = {
'name': {
'read_only': True,
},
}
def get_field_names(self, declared_fields, info):
field_names = super(AdHocCommandSerializer, self).get_field_names(declared_fields, info)
# Meta multiple inheritance and -field_name options don't seem to be
# taking effect above, so remove the undesired fields here.
return tuple(x for x in field_names if x not in ('unified_job_template', 'description'))
def build_standard_field(self, field_name, model_field):
field_class, field_kwargs = super(AdHocCommandSerializer, self).build_standard_field(field_name, model_field)
# Load module name choices dynamically from DB settings.
if field_name == 'module_name':
field_class = serializers.ChoiceField
module_name_choices = [(x, x) for x in settings.AD_HOC_COMMANDS]
module_name_default = 'command' if 'command' in [x[0] for x in module_name_choices] else ''
field_kwargs['choices'] = module_name_choices
field_kwargs['required'] = bool(not module_name_default)
field_kwargs['default'] = module_name_default or serializers.empty
field_kwargs['allow_blank'] = bool(module_name_default)
field_kwargs.pop('max_length', None)
return field_class, field_kwargs
def get_related(self, obj):
res = super(AdHocCommandSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
if obj.credential_id:
res['credential'] = self.reverse('api:credential_detail', kwargs={'pk': obj.credential_id})
res.update(dict(
events = self.reverse('api:ad_hoc_command_ad_hoc_command_events_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:ad_hoc_command_activity_stream_list', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:ad_hoc_command_notifications_list', kwargs={'pk': obj.pk}),
))
res['cancel'] = self.reverse('api:ad_hoc_command_cancel', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:ad_hoc_command_relaunch', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandSerializer, self).to_representation(obj)
if 'inventory' in ret and not obj.inventory_id:
ret['inventory'] = None
if 'credential' in ret and not obj.credential_id:
ret['credential'] = None
# For the UI, only module_name is returned for name, instead of the
# longer module name + module_args format.
if 'name' in ret:
ret['name'] = obj.module_name
return ret
def validate(self, attrs):
ret = super(AdHocCommandSerializer, self).validate(attrs)
return ret
def validate_extra_vars(self, value):
redacted_extra_vars, removed_vars = extract_ansible_vars(value)
if removed_vars:
raise serializers.ValidationError(_(
"{} are prohibited from use in ad hoc commands."
).format(", ".join(sorted(removed_vars, reverse=True))))
return vars_validate_or_raise(value)
class AdHocCommandDetailSerializer(AdHocCommandSerializer):
host_status_counts = serializers.SerializerMethodField(
help_text=_('A count of hosts uniquely assigned to each status.'),
)
class Meta:
model = AdHocCommand
fields = ('*', 'host_status_counts',)
def get_host_status_counts(self, obj):
try:
counts = obj.ad_hoc_command_events.only('event_data').get(event='playbook_on_stats').get_host_status_counts()
except AdHocCommandEvent.DoesNotExist:
counts = {}
return counts
class AdHocCommandCancelSerializer(AdHocCommandSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class AdHocCommandRelaunchSerializer(AdHocCommandSerializer):
class Meta:
fields = ()
def to_representation(self, obj):
if obj:
return dict([(p, u'') for p in obj.passwords_needed_to_start])
else:
return {}
class SystemJobTemplateSerializer(UnifiedJobTemplateSerializer):
class Meta:
model = SystemJobTemplate
fields = ('*', 'job_type',)
def get_related(self, obj):
res = super(SystemJobTemplateSerializer, self).get_related(obj)
res.update(dict(
jobs = self.reverse('api:system_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:system_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:system_job_template_launch', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:system_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:system_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:system_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
))
return res
class SystemJobSerializer(UnifiedJobSerializer):
result_stdout = serializers.SerializerMethodField()
class Meta:
model = SystemJob
fields = ('*', 'system_job_template', 'job_type', 'extra_vars', 'result_stdout', '-controller_node',)
def get_related(self, obj):
res = super(SystemJobSerializer, self).get_related(obj)
if obj.system_job_template:
res['system_job_template'] = self.reverse('api:system_job_template_detail',
kwargs={'pk': obj.system_job_template.pk})
res['notifications'] = self.reverse('api:system_job_notifications_list', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:system_job_cancel', kwargs={'pk': obj.pk})
res['events'] = self.reverse('api:system_job_events_list', kwargs={'pk': obj.pk})
return res
def get_result_stdout(self, obj):
try:
return obj.result_stdout
except StdoutMaxBytesExceeded as e:
return _(
"Standard Output too large to display ({text_size} bytes), "
"only download supported for sizes over {supported_size} bytes.").format(
text_size=e.total, supported_size=e.supported
)
class SystemJobCancelSerializer(SystemJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class WorkflowJobTemplateSerializer(JobTemplateMixin, LabelsListMixin, UnifiedJobTemplateSerializer):
show_capabilities = ['start', 'schedule', 'edit', 'copy', 'delete']
capabilities_prefetch = [
'admin', 'execute',
{'copy': 'organization.workflow_admin'}
]
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'extra_vars', 'organization', 'survey_enabled', 'allow_simultaneous',
'ask_variables_on_launch', 'inventory', 'ask_inventory_on_launch',)
def get_related(self, obj):
res = super(WorkflowJobTemplateSerializer, self).get_related(obj)
res.update(dict(
workflow_jobs = self.reverse('api:workflow_job_template_jobs_list', kwargs={'pk': obj.pk}),
schedules = self.reverse('api:workflow_job_template_schedules_list', kwargs={'pk': obj.pk}),
launch = self.reverse('api:workflow_job_template_launch', kwargs={'pk': obj.pk}),
workflow_nodes = self.reverse('api:workflow_job_template_workflow_nodes_list', kwargs={'pk': obj.pk}),
labels = self.reverse('api:workflow_job_template_label_list', kwargs={'pk': obj.pk}),
activity_stream = self.reverse('api:workflow_job_template_activity_stream_list', kwargs={'pk': obj.pk}),
notification_templates_any = self.reverse('api:workflow_job_template_notification_templates_any_list', kwargs={'pk': obj.pk}),
notification_templates_success = self.reverse('api:workflow_job_template_notification_templates_success_list', kwargs={'pk': obj.pk}),
notification_templates_error = self.reverse('api:workflow_job_template_notification_templates_error_list', kwargs={'pk': obj.pk}),
access_list = self.reverse('api:workflow_job_template_access_list', kwargs={'pk': obj.pk}),
object_roles = self.reverse('api:workflow_job_template_object_roles_list', kwargs={'pk': obj.pk}),
survey_spec = self.reverse('api:workflow_job_template_survey_spec', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:workflow_job_template_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
class WorkflowJobTemplateWithSpecSerializer(WorkflowJobTemplateSerializer):
'''
Used for activity stream entries.
'''
class Meta:
model = WorkflowJobTemplate
fields = ('*', 'survey_spec')
class WorkflowJobSerializer(LabelsListMixin, UnifiedJobSerializer):
class Meta:
model = WorkflowJob
fields = ('*', 'workflow_job_template', 'extra_vars', 'allow_simultaneous',
'job_template', 'is_sliced_job',
'-execution_node', '-event_processing_finished', '-controller_node',
'inventory',)
def get_related(self, obj):
res = super(WorkflowJobSerializer, self).get_related(obj)
if obj.workflow_job_template:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail',
kwargs={'pk': obj.workflow_job_template.pk})
res['notifications'] = self.reverse('api:workflow_job_notifications_list', kwargs={'pk': obj.pk})
if obj.job_template_id:
res['job_template'] = self.reverse('api:job_template_detail', kwargs={'pk': obj.job_template_id})
res['workflow_nodes'] = self.reverse('api:workflow_job_workflow_nodes_list', kwargs={'pk': obj.pk})
res['labels'] = self.reverse('api:workflow_job_label_list', kwargs={'pk': obj.pk})
res['activity_stream'] = self.reverse('api:workflow_job_activity_stream_list', kwargs={'pk': obj.pk})
res['relaunch'] = self.reverse('api:workflow_job_relaunch', kwargs={'pk': obj.pk})
if obj.can_cancel or True:
res['cancel'] = self.reverse('api:workflow_job_cancel', kwargs={'pk': obj.pk})
return res
def to_representation(self, obj):
ret = super(WorkflowJobSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_vars' in ret:
ret['extra_vars'] = obj.display_extra_vars()
return ret
class WorkflowJobListSerializer(WorkflowJobSerializer, UnifiedJobListSerializer):
class Meta:
fields = ('*', '-execution_node', '-controller_node',)
class WorkflowJobCancelSerializer(WorkflowJobSerializer):
can_cancel = serializers.BooleanField(read_only=True)
class Meta:
fields = ('can_cancel',)
class LaunchConfigurationBaseSerializer(BaseSerializer):
job_type = serializers.ChoiceField(allow_blank=True, allow_null=True, required=False, default=None,
choices=NEW_JOB_TYPE_CHOICES)
job_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
limit = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
skip_tags = serializers.CharField(allow_blank=True, allow_null=True, required=False, default=None)
diff_mode = serializers.NullBooleanField(required=False, default=None)
verbosity = serializers.ChoiceField(allow_null=True, required=False, default=None,
choices=VERBOSITY_CHOICES)
exclude_errors = ()
class Meta:
fields = ('*', 'extra_data', 'inventory', # Saved launch-time config fields
'job_type', 'job_tags', 'skip_tags', 'limit', 'skip_tags', 'diff_mode', 'verbosity')
def get_related(self, obj):
res = super(LaunchConfigurationBaseSerializer, self).get_related(obj)
if obj.inventory_id:
res['inventory'] = self.reverse('api:inventory_detail', kwargs={'pk': obj.inventory_id})
res['credentials'] = self.reverse(
'api:{}_credentials_list'.format(get_type_for_model(self.Meta.model)),
kwargs={'pk': obj.pk}
)
return res
def _build_mock_obj(self, attrs):
mock_obj = self.Meta.model()
if self.instance:
for field in self.instance._meta.fields:
setattr(mock_obj, field.name, getattr(self.instance, field.name))
field_names = set(field.name for field in self.Meta.model._meta.fields)
for field_name, value in list(attrs.items()):
setattr(mock_obj, field_name, value)
if field_name not in field_names:
attrs.pop(field_name)
return mock_obj
def to_representation(self, obj):
ret = super(LaunchConfigurationBaseSerializer, self).to_representation(obj)
if obj is None:
return ret
if 'extra_data' in ret and obj.survey_passwords:
ret['extra_data'] = obj.display_extra_vars()
return ret
def get_summary_fields(self, obj):
summary_fields = super(LaunchConfigurationBaseSerializer, self).get_summary_fields(obj)
# Credential would be an empty dictionary in this case
summary_fields.pop('credential', None)
return summary_fields
def validate(self, attrs):
db_extra_data = {}
if self.instance:
db_extra_data = parse_yaml_or_json(self.instance.extra_data)
attrs = super(LaunchConfigurationBaseSerializer, self).validate(attrs)
ujt = None
if 'unified_job_template' in attrs:
ujt = attrs['unified_job_template']
elif self.instance:
ujt = self.instance.unified_job_template
# build additional field survey_passwords to track redacted variables
password_dict = {}
extra_data = parse_yaml_or_json(attrs.get('extra_data', {}))
if hasattr(ujt, 'survey_password_variables'):
# Prepare additional field survey_passwords for save
for key in ujt.survey_password_variables():
if key in extra_data:
password_dict[key] = REPLACE_STR
# Replace $encrypted$ submissions with db value if exists
if 'extra_data' in attrs:
if password_dict:
if not self.instance or password_dict != self.instance.survey_passwords:
attrs['survey_passwords'] = password_dict.copy()
# Force dict type (cannot preserve YAML formatting if passwords are involved)
# Encrypt the extra_data for save, only current password vars in JT survey
# but first, make a copy or else this is referenced by request.data, and
# user could get encrypted string in form data in API browser
attrs['extra_data'] = extra_data.copy()
encrypt_dict(attrs['extra_data'], password_dict.keys())
# For any raw $encrypted$ string, either
# - replace with existing DB value
# - raise a validation error
# - ignore, if default present
for key in password_dict.keys():
if attrs['extra_data'].get(key, None) == REPLACE_STR:
if key not in db_extra_data:
element = ujt.pivot_spec(ujt.survey_spec)[key]
# NOTE: validation _of_ the default values of password type
# questions not done here or on launch, but doing so could
# leak info about values, so it should not be added
if not ('default' in element and element['default']):
raise serializers.ValidationError(
{"extra_data": _('Provided variable {} has no database value to replace with.').format(key)})
else:
attrs['extra_data'][key] = db_extra_data[key]
# Build unsaved version of this config, use it to detect prompts errors
mock_obj = self._build_mock_obj(attrs)
accepted, rejected, errors = ujt._accept_or_ignore_job_kwargs(
_exclude_errors=self.exclude_errors, **mock_obj.prompts_dict())
# Remove all unprocessed $encrypted$ strings, indicating default usage
if 'extra_data' in attrs and password_dict:
for key, value in attrs['extra_data'].copy().items():
if value == REPLACE_STR:
if key in password_dict:
attrs['extra_data'].pop(key)
attrs.get('survey_passwords', {}).pop(key, None)
else:
errors.setdefault('extra_vars', []).append(
_('"$encrypted$ is a reserved keyword, may not be used for {var_name}."'.format(key))
)
# Launch configs call extra_vars extra_data for historical reasons
if 'extra_vars' in errors:
errors['extra_data'] = errors.pop('extra_vars')
if errors:
raise serializers.ValidationError(errors)
# Model `.save` needs the container dict, not the psuedo fields
if mock_obj.char_prompts:
attrs['char_prompts'] = mock_obj.char_prompts
return attrs
class WorkflowJobTemplateNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
exclude_errors = ('required',) # required variables may be provided by WFJT or on launch
class Meta:
model = WorkflowJobTemplateNode
fields = ('*', 'credential', 'workflow_job_template', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',)
def get_related(self, obj):
res = super(WorkflowJobTemplateNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_template_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_template_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_template_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
res['workflow_job_template'] = self.reverse('api:workflow_job_template_detail', kwargs={'pk': obj.workflow_job_template.pk})
except WorkflowJobTemplate.DoesNotExist:
pass
return res
def build_field(self, field_name, info, model_class, nested_depth):
# have to special-case the field so that DRF will not automagically make it
# read-only because it's a property on the model.
if field_name == 'credential':
return self.build_standard_field(field_name,
self.credential)
return super(WorkflowJobTemplateNodeSerializer, self).build_field(field_name, info, model_class, nested_depth)
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeSerializer, self).build_relational_field(field_name, relation_info)
# workflow_job_template is read-only unless creating a new node.
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
def validate(self, attrs):
deprecated_fields = {}
if 'credential' in attrs: # TODO: remove when v2 API is deprecated
deprecated_fields['credential'] = attrs.pop('credential')
view = self.context.get('view')
attrs = super(WorkflowJobTemplateNodeSerializer, self).validate(attrs)
ujt_obj = None
if 'unified_job_template' in attrs:
ujt_obj = attrs['unified_job_template']
elif self.instance:
ujt_obj = self.instance.unified_job_template
if 'credential' in deprecated_fields: # TODO: remove when v2 API is deprecated
cred = deprecated_fields['credential']
attrs['credential'] = cred
if cred is not None:
if not ujt_obj.ask_credential_on_launch:
raise serializers.ValidationError({"credential": _(
"Related template is not configured to accept credentials on launch.")})
cred = Credential.objects.get(pk=cred)
view = self.context.get('view', None)
if (not view) or (not view.request) or (view.request.user not in cred.use_role):
raise PermissionDenied()
return attrs
def create(self, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).create(validated_data)
if 'credential' in deprecated_fields:
if deprecated_fields['credential']:
obj.credentials.add(deprecated_fields['credential'])
return obj
def update(self, obj, validated_data): # TODO: remove when v2 API is deprecated
deprecated_fields = {}
if 'credential' in validated_data:
deprecated_fields['credential'] = validated_data.pop('credential')
obj = super(WorkflowJobTemplateNodeSerializer, self).update(obj, validated_data)
if 'credential' in deprecated_fields:
existing = obj.credentials.filter(credential_type__kind='ssh')
new_cred = deprecated_fields['credential']
if new_cred not in existing:
for cred in existing:
obj.credentials.remove(cred)
if new_cred:
obj.credentials.add(new_cred)
return obj
class WorkflowJobNodeSerializer(LaunchConfigurationBaseSerializer):
credential = DeprecatedCredentialField()
success_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
failure_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
always_nodes = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = WorkflowJobNode
fields = ('*', 'credential', 'job', 'workflow_job', '-name', '-description', 'id', 'url', 'related',
'unified_job_template', 'success_nodes', 'failure_nodes', 'always_nodes',
'do_not_run',)
def get_related(self, obj):
res = super(WorkflowJobNodeSerializer, self).get_related(obj)
res['success_nodes'] = self.reverse('api:workflow_job_node_success_nodes_list', kwargs={'pk': obj.pk})
res['failure_nodes'] = self.reverse('api:workflow_job_node_failure_nodes_list', kwargs={'pk': obj.pk})
res['always_nodes'] = self.reverse('api:workflow_job_node_always_nodes_list', kwargs={'pk': obj.pk})
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.job:
res['job'] = obj.job.get_absolute_url(self.context.get('request'))
if obj.workflow_job:
res['workflow_job'] = self.reverse('api:workflow_job_detail', kwargs={'pk': obj.workflow_job.pk})
return res
class WorkflowJobNodeListSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobNodeDetailSerializer(WorkflowJobNodeSerializer):
pass
class WorkflowJobTemplateNodeDetailSerializer(WorkflowJobTemplateNodeSerializer):
'''
Influence the api browser sample data to not include workflow_job_template
when editing a WorkflowNode.
Note: I was not able to accomplish this through the use of extra_kwargs.
Maybe something to do with workflow_job_template being a relational field?
'''
def build_relational_field(self, field_name, relation_info):
field_class, field_kwargs = super(WorkflowJobTemplateNodeDetailSerializer, self).build_relational_field(field_name, relation_info)
if self.instance and field_name == 'workflow_job_template':
field_kwargs['read_only'] = True
field_kwargs.pop('queryset', None)
return field_class, field_kwargs
class JobListSerializer(JobSerializer, UnifiedJobListSerializer):
pass
class AdHocCommandListSerializer(AdHocCommandSerializer, UnifiedJobListSerializer):
pass
class SystemJobListSerializer(SystemJobSerializer, UnifiedJobListSerializer):
class Meta:
model = SystemJob
fields = ('*', '-controller_node') # field removal undone by UJ serializer
class JobHostSummarySerializer(BaseSerializer):
class Meta:
model = JobHostSummary
fields = ('*', '-name', '-description', 'job', 'host', 'host_name', 'changed',
'dark', 'failures', 'ok', 'processed', 'skipped', 'failed',
'ignored', 'rescued')
def get_related(self, obj):
res = super(JobHostSummarySerializer, self).get_related(obj)
res.update(dict(
job=self.reverse('api:job_detail', kwargs={'pk': obj.job.pk})))
if obj.host is not None:
res.update(dict(
host=self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
))
return res
def get_summary_fields(self, obj):
d = super(JobHostSummarySerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
class JobEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display2', read_only=True)
event_level = serializers.IntegerField(read_only=True)
class Meta:
model = JobEvent
fields = ('*', '-name', '-description', 'job', 'event', 'counter',
'event_display', 'event_data', 'event_level', 'failed',
'changed', 'uuid', 'parent_uuid', 'host', 'host_name', 'parent',
'playbook', 'play', 'task', 'role', 'stdout', 'start_line', 'end_line',
'verbosity')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res.update(dict(
job = self.reverse('api:job_detail', kwargs={'pk': obj.job_id}),
))
if obj.parent_id:
res['parent'] = self.reverse('api:job_event_detail', kwargs={'pk': obj.parent_id})
res['children'] = self.reverse('api:job_event_children_list', kwargs={'pk': obj.pk})
if obj.host_id:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host_id})
if obj.hosts.exists():
res['hosts'] = self.reverse('api:job_event_hosts_list', kwargs={'pk': obj.pk})
return res
def get_summary_fields(self, obj):
d = super(JobEventSerializer, self).get_summary_fields(obj)
try:
d['job']['job_template_id'] = obj.job.job_template.id
d['job']['job_template_name'] = obj.job.job_template.name
except (KeyError, AttributeError):
pass
return d
def to_representation(self, obj):
ret = super(JobEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
# Show full stdout for playbook_on_* events.
if obj and obj.event.startswith('playbook_on'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class JobEventWebSocketSerializer(JobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = JobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'job_events'
class ProjectUpdateEventSerializer(JobEventSerializer):
stdout = serializers.SerializerMethodField()
event_data = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', '-name', '-description', '-job', '-job_id',
'-parent_uuid', '-parent', '-host', 'project_update')
def get_related(self, obj):
res = super(JobEventSerializer, self).get_related(obj)
res['project_update'] = self.reverse(
'api:project_update_detail', kwargs={'pk': obj.project_update_id}
)
return res
def get_stdout(self, obj):
return UriCleaner.remove_sensitive(obj.stdout)
def get_event_data(self, obj):
try:
return json.loads(
UriCleaner.remove_sensitive(
json.dumps(obj.event_data)
)
)
except Exception:
logger.exception("Failed to sanitize event_data")
return {}
class ProjectUpdateEventWebSocketSerializer(ProjectUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = ProjectUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'project_update_events'
class AdHocCommandEventSerializer(BaseSerializer):
event_display = serializers.CharField(source='get_event_display', read_only=True)
class Meta:
model = AdHocCommandEvent
fields = ('*', '-name', '-description', 'ad_hoc_command', 'event',
'counter', 'event_display', 'event_data', 'failed',
'changed', 'uuid', 'host', 'host_name', 'stdout',
'start_line', 'end_line', 'verbosity')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res.update(dict(
ad_hoc_command = self.reverse('api:ad_hoc_command_detail', kwargs={'pk': obj.ad_hoc_command_id}),
))
if obj.host:
res['host'] = self.reverse('api:host_detail', kwargs={'pk': obj.host.pk})
return res
def to_representation(self, obj):
ret = super(AdHocCommandEventSerializer, self).to_representation(obj)
# Show full stdout for event detail view, truncate only for list view.
if hasattr(self.context.get('view', None), 'retrieve'):
return ret
max_bytes = settings.EVENT_STDOUT_MAX_BYTES_DISPLAY
if max_bytes > 0 and 'stdout' in ret and len(ret['stdout']) >= max_bytes:
ret['stdout'] = ret['stdout'][:(max_bytes - 1)] + u'\u2026'
set_count = 0
reset_count = 0
for m in ANSI_SGR_PATTERN.finditer(ret['stdout']):
if m.string[m.start():m.end()] == u'\u001b[0m':
reset_count += 1
else:
set_count += 1
ret['stdout'] += u'\u001b[0m' * (set_count - reset_count)
return ret
class AdHocCommandEventWebSocketSerializer(AdHocCommandEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = AdHocCommandEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'ad_hoc_command_events'
class InventoryUpdateEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = InventoryUpdateEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'inventory_update')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['inventory_update'] = self.reverse(
'api:inventory_update_detail', kwargs={'pk': obj.inventory_update_id}
)
return res
class InventoryUpdateEventWebSocketSerializer(InventoryUpdateEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = InventoryUpdateEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'inventory_update_events'
class SystemJobEventSerializer(AdHocCommandEventSerializer):
class Meta:
model = SystemJobEvent
fields = ('*', '-name', '-description', '-ad_hoc_command', '-host',
'-host_name', 'system_job')
def get_related(self, obj):
res = super(AdHocCommandEventSerializer, self).get_related(obj)
res['system_job'] = self.reverse(
'api:system_job_detail', kwargs={'pk': obj.system_job_id}
)
return res
class SystemJobEventWebSocketSerializer(SystemJobEventSerializer):
created = serializers.SerializerMethodField()
modified = serializers.SerializerMethodField()
event_name = serializers.CharField(source='event')
group_name = serializers.SerializerMethodField()
class Meta:
model = SystemJobEvent
fields = ('*', 'event_name', 'group_name',)
def get_created(self, obj):
return obj.created.isoformat()
def get_modified(self, obj):
return obj.modified.isoformat()
def get_group_name(self, obj):
return 'system_job_events'
class JobLaunchSerializer(BaseSerializer):
# Representational fields
passwords_needed_to_start = serializers.ReadOnlyField()
can_start_without_user_input = serializers.BooleanField(read_only=True)
variables_needed_to_start = serializers.ReadOnlyField()
credential_needed_to_start = serializers.SerializerMethodField()
inventory_needed_to_start = serializers.SerializerMethodField()
survey_enabled = serializers.SerializerMethodField()
job_template_data = serializers.SerializerMethodField()
defaults = serializers.SerializerMethodField()
# Accepted on launch fields
extra_vars = serializers.JSONField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
credentials = serializers.PrimaryKeyRelatedField(
many=True, queryset=Credential.objects.all(),
required=False, write_only=True
)
credential_passwords = VerbatimField(required=False, write_only=True)
diff_mode = serializers.BooleanField(required=False, write_only=True)
job_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
job_type = serializers.ChoiceField(required=False, choices=NEW_JOB_TYPE_CHOICES, write_only=True)
skip_tags = serializers.CharField(required=False, write_only=True, allow_blank=True)
limit = serializers.CharField(required=False, write_only=True, allow_blank=True)
verbosity = serializers.ChoiceField(required=False, choices=VERBOSITY_CHOICES, write_only=True)
class Meta:
model = JobTemplate
fields = ('can_start_without_user_input', 'passwords_needed_to_start',
'extra_vars', 'inventory', 'limit', 'job_tags', 'skip_tags', 'job_type', 'verbosity', 'diff_mode',
'credentials', 'credential_passwords', 'ask_variables_on_launch', 'ask_tags_on_launch',
'ask_diff_mode_on_launch', 'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_limit_on_launch',
'ask_verbosity_on_launch', 'ask_inventory_on_launch', 'ask_credential_on_launch',
'survey_enabled', 'variables_needed_to_start', 'credential_needed_to_start',
'inventory_needed_to_start', 'job_template_data', 'defaults', 'verbosity')
read_only_fields = (
'ask_diff_mode_on_launch', 'ask_variables_on_launch', 'ask_limit_on_launch', 'ask_tags_on_launch',
'ask_skip_tags_on_launch', 'ask_job_type_on_launch', 'ask_verbosity_on_launch',
'ask_inventory_on_launch', 'ask_credential_on_launch',)
def get_credential_needed_to_start(self, obj):
return False
def get_inventory_needed_to_start(self, obj):
return not (obj and obj.inventory)
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in JobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
elif field_name == 'credentials':
if self.version > 1:
for cred in obj.credentials.all():
cred_dict = dict(
id=cred.id,
name=cred.name,
credential_type=cred.credential_type.pk,
passwords_needed=cred.passwords_needed
)
if cred.credential_type.managed_by_tower and 'vault_id' in cred.credential_type.defined_fields:
cred_dict['vault_id'] = cred.get_input('vault_id', default=None)
defaults_dict.setdefault(field_name, []).append(cred_dict)
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate_extra_vars(self, value):
return vars_validate_or_raise(value)
def validate(self, attrs):
template = self.context.get('template')
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(
_exclude_errors=['prompts'], # make several error types non-blocking
**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Job Template is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
# Prohibit providing multiple credentials of the same CredentialType.kind
# or multiples of same vault id
distinct_cred_kinds = []
for cred in accepted.get('credentials', []):
if cred.unique_hash() in distinct_cred_kinds:
errors.setdefault('credentials', []).append(_(
'Cannot assign multiple {} credentials.'
).format(cred.unique_hash(display=True)))
if cred.credential_type.kind not in ('ssh', 'vault', 'cloud', 'net'):
errors.setdefault('credentials', []).append(_(
'Cannot assign a Credential of kind `{}`'
).format(cred.credential_type.kind))
distinct_cred_kinds.append(cred.unique_hash())
# Prohibit removing credentials from the JT list (unsupported for now)
template_credentials = template.credentials.all()
if 'credentials' in attrs:
removed_creds = set(template_credentials) - set(attrs['credentials'])
provided_mapping = Credential.unique_dict(attrs['credentials'])
for cred in removed_creds:
if cred.unique_hash() in provided_mapping.keys():
continue # User replaced credential with new of same type
errors.setdefault('credentials', []).append(_(
'Removing {} credential at launch time without replacement is not supported. '
'Provided list lacked credential(s): {}.'
).format(cred.unique_hash(display=True), ', '.join([str(c) for c in removed_creds])))
# verify that credentials (either provided or existing) don't
# require launch-time passwords that have not been provided
if 'credentials' in accepted:
launch_credentials = accepted['credentials']
else:
launch_credentials = template_credentials
passwords = attrs.get('credential_passwords', {}) # get from original attrs
passwords_lacking = []
for cred in launch_credentials:
for p in cred.passwords_needed:
if p not in passwords:
passwords_lacking.append(p)
else:
accepted.setdefault('credential_passwords', {})
accepted['credential_passwords'][p] = passwords[p]
if len(passwords_lacking):
errors['passwords_needed_to_start'] = passwords_lacking
if errors:
raise serializers.ValidationError(errors)
if 'extra_vars' in accepted:
extra_vars_save = accepted['extra_vars']
else:
extra_vars_save = None
# Validate job against JobTemplate clean_ methods
accepted = super(JobLaunchSerializer, self).validate(accepted)
# Preserve extra_vars as dictionary internally
if extra_vars_save:
accepted['extra_vars'] = extra_vars_save
return accepted
class WorkflowJobLaunchSerializer(BaseSerializer):
can_start_without_user_input = serializers.BooleanField(read_only=True)
defaults = serializers.SerializerMethodField()
variables_needed_to_start = serializers.ReadOnlyField()
survey_enabled = serializers.SerializerMethodField()
extra_vars = VerbatimField(required=False, write_only=True)
inventory = serializers.PrimaryKeyRelatedField(
queryset=Inventory.objects.all(),
required=False, write_only=True
)
workflow_job_template_data = serializers.SerializerMethodField()
class Meta:
model = WorkflowJobTemplate
fields = ('ask_inventory_on_launch', 'can_start_without_user_input', 'defaults', 'extra_vars',
'inventory', 'survey_enabled', 'variables_needed_to_start',
'node_templates_missing', 'node_prompts_rejected',
'workflow_job_template_data', 'survey_enabled', 'ask_variables_on_launch')
read_only_fields = ('ask_inventory_on_launch', 'ask_variables_on_launch')
def get_survey_enabled(self, obj):
if obj:
return obj.survey_enabled and 'spec' in obj.survey_spec
return False
def get_defaults(self, obj):
defaults_dict = {}
for field_name in WorkflowJobTemplate.get_ask_mapping().keys():
if field_name == 'inventory':
defaults_dict[field_name] = dict(
name=getattrd(obj, '%s.name' % field_name, None),
id=getattrd(obj, '%s.pk' % field_name, None))
else:
defaults_dict[field_name] = getattr(obj, field_name)
return defaults_dict
def get_workflow_job_template_data(self, obj):
return dict(name=obj.name, id=obj.id, description=obj.description)
def validate(self, attrs):
template = self.instance
accepted, rejected, errors = template._accept_or_ignore_job_kwargs(**attrs)
self._ignored_fields = rejected
if template.inventory and template.inventory.pending_deletion is True:
errors['inventory'] = _("The inventory associated with this Workflow is being deleted.")
elif 'inventory' in accepted and accepted['inventory'].pending_deletion:
errors['inventory'] = _("The provided inventory is being deleted.")
if errors:
raise serializers.ValidationError(errors)
WFJT_extra_vars = template.extra_vars
WFJT_inventory = template.inventory
super(WorkflowJobLaunchSerializer, self).validate(attrs)
template.extra_vars = WFJT_extra_vars
template.inventory = WFJT_inventory
return accepted
class NotificationTemplateSerializer(BaseSerializer):
show_capabilities = ['edit', 'delete', 'copy']
capabilities_prefetch = [{'copy': 'organization.admin'}]
class Meta:
model = NotificationTemplate
fields = ('*', 'organization', 'notification_type', 'notification_configuration')
type_map = {"string": (str,),
"int": (int,),
"bool": (bool,),
"list": (list,),
"password": (str,),
"object": (dict, OrderedDict)}
def to_representation(self, obj):
ret = super(NotificationTemplateSerializer, self).to_representation(obj)
if 'notification_configuration' in ret:
ret['notification_configuration'] = obj.display_notification_configuration()
return ret
def get_related(self, obj):
res = super(NotificationTemplateSerializer, self).get_related(obj)
res.update(dict(
test = self.reverse('api:notification_template_test', kwargs={'pk': obj.pk}),
notifications = self.reverse('api:notification_template_notification_list', kwargs={'pk': obj.pk}),
))
if self.version > 1:
res['copy'] = self.reverse('api:notification_template_copy', kwargs={'pk': obj.pk})
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
def _recent_notifications(self, obj):
return [{'id': x.id, 'status': x.status, 'created': x.created} for x in obj.notifications.all().order_by('-created')[:5]]
def get_summary_fields(self, obj):
d = super(NotificationTemplateSerializer, self).get_summary_fields(obj)
d['recent_notifications'] = self._recent_notifications(obj)
return d
def validate(self, attrs):
from awx.api.views import NotificationTemplateDetail
notification_type = None
if 'notification_type' in attrs:
notification_type = attrs['notification_type']
elif self.instance:
notification_type = self.instance.notification_type
else:
notification_type = None
if not notification_type:
raise serializers.ValidationError(_('Missing required fields for Notification Configuration: notification_type'))
notification_class = NotificationTemplate.CLASS_FOR_NOTIFICATION_TYPE[notification_type]
missing_fields = []
incorrect_type_fields = []
error_list = []
if 'notification_configuration' not in attrs:
return attrs
if self.context['view'].kwargs and isinstance(self.context['view'], NotificationTemplateDetail):
object_actual = self.context['view'].get_object()
else:
object_actual = None
for field, params in notification_class.init_parameters.items():
if field not in attrs['notification_configuration']:
if 'default' in params:
attrs['notification_configuration'][field] = params['default']
else:
missing_fields.append(field)
continue
field_val = attrs['notification_configuration'][field]
field_type = params['type']
expected_types = self.type_map[field_type]
if not type(field_val) in expected_types:
incorrect_type_fields.append((field, field_type))
continue
if field_type == "list" and len(field_val) < 1:
error_list.append(_("No values specified for field '{}'").format(field))
continue
if field_type == "password" and field_val == "$encrypted$" and object_actual is not None:
attrs['notification_configuration'][field] = object_actual.notification_configuration[field]
if missing_fields:
error_list.append(_("Missing required fields for Notification Configuration: {}.").format(missing_fields))
if incorrect_type_fields:
for type_field_error in incorrect_type_fields:
error_list.append(_("Configuration field '{}' incorrect type, expected {}.").format(type_field_error[0],
type_field_error[1]))
if error_list:
raise serializers.ValidationError(error_list)
return super(NotificationTemplateSerializer, self).validate(attrs)
class NotificationSerializer(BaseSerializer):
class Meta:
model = Notification
fields = ('*', '-name', '-description', 'notification_template', 'error', 'status', 'notifications_sent',
'notification_type', 'recipients', 'subject')
def get_related(self, obj):
res = super(NotificationSerializer, self).get_related(obj)
res.update(dict(
notification_template = self.reverse('api:notification_template_detail', kwargs={'pk': obj.notification_template.pk}),
))
return res
class LabelSerializer(BaseSerializer):
class Meta:
model = Label
fields = ('*', '-description', 'organization')
def get_related(self, obj):
res = super(LabelSerializer, self).get_related(obj)
if obj.organization:
res['organization'] = self.reverse('api:organization_detail', kwargs={'pk': obj.organization.pk})
return res
class SchedulePreviewSerializer(BaseSerializer):
class Meta:
model = Schedule
fields = ('rrule',)
# We reject rrules if:
# - DTSTART is not include
# - INTERVAL is not included
# - SECONDLY is used
# - TZID is used
# - BYDAY prefixed with a number (MO is good but not 20MO)
# - BYYEARDAY
# - BYWEEKNO
# - Multiple DTSTART or RRULE elements
# - Can't contain both COUNT and UNTIL
# - COUNT > 999
def validate_rrule(self, value):
rrule_value = value
multi_by_month_day = r".*?BYMONTHDAY[\:\=][0-9]+,-*[0-9]+"
multi_by_month = r".*?BYMONTH[\:\=][0-9]+,[0-9]+"
by_day_with_numeric_prefix = r".*?BYDAY[\:\=][0-9]+[a-zA-Z]{2}"
match_count = re.match(r".*?(COUNT\=[0-9]+)", rrule_value)
match_multiple_dtstart = re.findall(r".*?(DTSTART(;[^:]+)?\:[0-9]+T[0-9]+Z?)", rrule_value)
match_native_dtstart = re.findall(r".*?(DTSTART:[0-9]+T[0-9]+) ", rrule_value)
match_multiple_rrule = re.findall(r".*?(RRULE\:)", rrule_value)
if not len(match_multiple_dtstart):
raise serializers.ValidationError(_('Valid DTSTART required in rrule. Value should start with: DTSTART:YYYYMMDDTHHMMSSZ'))
if len(match_native_dtstart):
raise serializers.ValidationError(_('DTSTART cannot be a naive datetime. Specify ;TZINFO= or YYYYMMDDTHHMMSSZZ.'))
if len(match_multiple_dtstart) > 1:
raise serializers.ValidationError(_('Multiple DTSTART is not supported.'))
if not len(match_multiple_rrule):
raise serializers.ValidationError(_('RRULE required in rrule.'))
if len(match_multiple_rrule) > 1:
raise serializers.ValidationError(_('Multiple RRULE is not supported.'))
if 'interval' not in rrule_value.lower():
raise serializers.ValidationError(_('INTERVAL required in rrule.'))
if 'secondly' in rrule_value.lower():
raise serializers.ValidationError(_('SECONDLY is not supported.'))
if re.match(multi_by_month_day, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHDAYs not supported.'))
if re.match(multi_by_month, rrule_value):
raise serializers.ValidationError(_('Multiple BYMONTHs not supported.'))
if re.match(by_day_with_numeric_prefix, rrule_value):
raise serializers.ValidationError(_("BYDAY with numeric prefix not supported."))
if 'byyearday' in rrule_value.lower():
raise serializers.ValidationError(_("BYYEARDAY not supported."))
if 'byweekno' in rrule_value.lower():
raise serializers.ValidationError(_("BYWEEKNO not supported."))
if 'COUNT' in rrule_value and 'UNTIL' in rrule_value:
raise serializers.ValidationError(_("RRULE may not contain both COUNT and UNTIL"))
if match_count:
count_val = match_count.groups()[0].strip().split("=")
if int(count_val[1]) > 999:
raise serializers.ValidationError(_("COUNT > 999 is unsupported."))
try:
Schedule.rrulestr(rrule_value)
except Exception as e:
raise serializers.ValidationError(_("rrule parsing failed validation: {}").format(e))
return value
class ScheduleSerializer(LaunchConfigurationBaseSerializer, SchedulePreviewSerializer):
show_capabilities = ['edit', 'delete']
timezone = serializers.SerializerMethodField()
until = serializers.SerializerMethodField()
class Meta:
model = Schedule
fields = ('*', 'unified_job_template', 'enabled', 'dtstart', 'dtend', 'rrule', 'next_run', 'timezone',
'until')
def get_timezone(self, obj):
return obj.timezone
def get_until(self, obj):
return obj.until
def get_related(self, obj):
res = super(ScheduleSerializer, self).get_related(obj)
res.update(dict(
unified_jobs = self.reverse('api:schedule_unified_jobs_list', kwargs={'pk': obj.pk}),
))
if obj.unified_job_template:
res['unified_job_template'] = obj.unified_job_template.get_absolute_url(self.context.get('request'))
try:
if obj.unified_job_template.project:
res['project'] = obj.unified_job_template.project.get_absolute_url(self.context.get('request'))
except ObjectDoesNotExist:
pass
if obj.inventory:
res['inventory'] = obj.inventory.get_absolute_url(self.context.get('request'))
elif obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
res['inventory'] = obj.unified_job_template.inventory.get_absolute_url(self.context.get('request'))
return res
def get_summary_fields(self, obj):
summary_fields = super(ScheduleSerializer, self).get_summary_fields(obj)
if 'inventory' in summary_fields:
return summary_fields
inventory = None
if obj.unified_job_template and getattr(obj.unified_job_template, 'inventory', None):
inventory = obj.unified_job_template.inventory
else:
return summary_fields
summary_fields['inventory'] = dict()
for field in SUMMARIZABLE_FK_FIELDS['inventory']:
summary_fields['inventory'][field] = getattr(inventory, field, None)
return summary_fields
def validate_unified_job_template(self, value):
if type(value) == InventorySource and value.source not in SCHEDULEABLE_PROVIDERS:
raise serializers.ValidationError(_('Inventory Source must be a cloud resource.'))
elif type(value) == Project and value.scm_type == '':
raise serializers.ValidationError(_('Manual Project cannot have a schedule set.'))
elif type(value) == InventorySource and value.source == 'scm' and value.update_on_project_update:
raise serializers.ValidationError(_(
'Inventory sources with `update_on_project_update` cannot be scheduled. '
'Schedule its source project `{}` instead.'.format(value.source_project.name)))
return value
class InstanceSerializer(BaseSerializer):
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance'),
read_only=True
)
class Meta:
model = Instance
read_only_fields = ('uuid', 'hostname', 'version')
fields = ("id", "type", "url", "related", "uuid", "hostname", "created", "modified", 'capacity_adjustment',
"version", "capacity", "consumed_capacity", "percent_capacity_remaining", "jobs_running", "jobs_total",
"cpu", "memory", "cpu_capacity", "mem_capacity", "enabled", "managed_by_policy")
def get_related(self, obj):
res = super(InstanceSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_unified_jobs_list', kwargs={'pk': obj.pk})
res['instance_groups'] = self.reverse('api:instance_instance_groups_list', kwargs={'pk': obj.pk})
return res
def get_consumed_capacity(self, obj):
return obj.consumed_capacity
def get_percent_capacity_remaining(self, obj):
if not obj.capacity or obj.consumed_capacity >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(((float(obj.capacity) - float(obj.consumed_capacity)) / (float(obj.capacity))) * 100))
class InstanceGroupSerializer(BaseSerializer):
committed_capacity = serializers.SerializerMethodField()
consumed_capacity = serializers.SerializerMethodField()
percent_capacity_remaining = serializers.SerializerMethodField()
jobs_running = serializers.IntegerField(
help_text=_('Count of jobs in the running or waiting state that '
'are targeted for this instance group'),
read_only=True
)
jobs_total = serializers.IntegerField(
help_text=_('Count of all jobs that target this instance group'),
read_only=True
)
instances = serializers.SerializerMethodField()
is_controller = serializers.BooleanField(
help_text=_('Indicates whether instance group controls any other group'),
read_only=True
)
is_isolated = serializers.BooleanField(
help_text=_('Indicates whether instances in this group are isolated.'
'Isolated groups have a designated controller group.'),
read_only=True
)
# NOTE: help_text is duplicated from field definitions, no obvious way of
# both defining field details here and also getting the field's help_text
policy_instance_percentage = serializers.IntegerField(
default=0, min_value=0, max_value=100, required=False, initial=0,
label=_('Policy Instance Percentage'),
help_text=_("Minimum percentage of all instances that will be automatically assigned to "
"this group when new instances come online.")
)
policy_instance_minimum = serializers.IntegerField(
default=0, min_value=0, required=False, initial=0,
label=_('Policy Instance Minimum'),
help_text=_("Static minimum number of Instances that will be automatically assign to "
"this group when new instances come online.")
)
policy_instance_list = serializers.ListField(
child=serializers.CharField(), required=False,
label=_('Policy Instance List'),
help_text=_("List of exact-match Instances that will be assigned to this group")
)
class Meta:
model = InstanceGroup
fields = ("id", "type", "url", "related", "name", "created", "modified",
"capacity", "committed_capacity", "consumed_capacity",
"percent_capacity_remaining", "jobs_running", "jobs_total",
"instances", "controller", "is_controller", "is_isolated",
"policy_instance_percentage", "policy_instance_minimum", "policy_instance_list")
def get_related(self, obj):
res = super(InstanceGroupSerializer, self).get_related(obj)
res['jobs'] = self.reverse('api:instance_group_unified_jobs_list', kwargs={'pk': obj.pk})
res['instances'] = self.reverse('api:instance_group_instance_list', kwargs={'pk': obj.pk})
if obj.controller_id:
res['controller'] = self.reverse('api:instance_group_detail', kwargs={'pk': obj.controller_id})
return res
def validate_policy_instance_list(self, value):
for instance_name in value:
if value.count(instance_name) > 1:
raise serializers.ValidationError(_('Duplicate entry {}.').format(instance_name))
if not Instance.objects.filter(hostname=instance_name).exists():
raise serializers.ValidationError(_('{} is not a valid hostname of an existing instance.').format(instance_name))
if Instance.objects.get(hostname=instance_name).is_isolated():
raise serializers.ValidationError(_('Isolated instances may not be added or removed from instances groups via the API.'))
if self.instance and self.instance.controller_id is not None:
raise serializers.ValidationError(_('Isolated instance group membership may not be managed via the API.'))
return value
def validate_name(self, value):
if self.instance and self.instance.name == 'tower' and value != 'tower':
raise serializers.ValidationError(_('tower instance group name may not be changed.'))
return value
def get_capacity_dict(self):
# Store capacity values (globally computed) in the context
if 'capacity_map' not in self.context:
ig_qs = None
jobs_qs = UnifiedJob.objects.filter(status__in=('running', 'waiting'))
if self.parent: # Is ListView:
ig_qs = self.parent.instance
self.context['capacity_map'] = InstanceGroup.objects.capacity_values(
qs=ig_qs, tasks=jobs_qs, breakdown=True)
return self.context['capacity_map']
def get_consumed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['running_capacity']
def get_committed_capacity(self, obj):
return self.get_capacity_dict()[obj.name]['committed_capacity']
def get_percent_capacity_remaining(self, obj):
if not obj.capacity:
return 0.0
consumed = self.get_consumed_capacity(obj)
if consumed >= obj.capacity:
return 0.0
else:
return float("{0:.2f}".format(
((float(obj.capacity) - float(consumed)) / (float(obj.capacity))) * 100)
)
def get_instances(self, obj):
return obj.instances.count()
class ActivityStreamSerializer(BaseSerializer):
changes = serializers.SerializerMethodField()
object_association = serializers.SerializerMethodField()
@cached_property
def _local_summarizable_fk_fields(self):
summary_dict = copy.copy(SUMMARIZABLE_FK_FIELDS)
# Special requests
summary_dict['group'] = summary_dict['group'] + ('inventory_id',)
for key in summary_dict.keys():
if 'id' not in summary_dict[key]:
summary_dict[key] = summary_dict[key] + ('id',)
field_list = list(summary_dict.items())
# Needed related fields that are not in the default summary fields
field_list += [
('workflow_job_template_node', ('id', 'unified_job_template_id')),
('label', ('id', 'name', 'organization_id')),
('notification', ('id', 'status', 'notification_type', 'notification_template_id')),
('o_auth2_access_token', ('id', 'user_id', 'description', 'application_id', 'scope')),
('o_auth2_application', ('id', 'name', 'description')),
('credential_type', ('id', 'name', 'description', 'kind', 'managed_by_tower')),
('ad_hoc_command', ('id', 'name', 'status', 'limit'))
]
return field_list
class Meta:
model = ActivityStream
fields = ('*', '-name', '-description', '-created', '-modified',
'timestamp', 'operation', 'changes', 'object1', 'object2', 'object_association')
def get_fields(self):
ret = super(ActivityStreamSerializer, self).get_fields()
for key, field in list(ret.items()):
if key == 'changes':
field.help_text = _('A summary of the new and changed values when an object is created, updated, or deleted')
if key == 'object1':
field.help_text = _('For create, update, and delete events this is the object type that was affected. '
'For associate and disassociate events this is the object type associated or disassociated with object2.')
if key == 'object2':
field.help_text = _('Unpopulated for create, update, and delete events. For associate and disassociate '
'events this is the object type that object1 is being associated with.')
if key == 'operation':
field.help_text = _('The action taken with respect to the given object(s).')
return ret
def get_changes(self, obj):
if obj is None:
return {}
try:
return json.loads(obj.changes)
except Exception:
logger.warn("Error deserializing activity stream json changes")
return {}
def get_object_association(self, obj):
if not obj.object_relationship_type:
return ""
elif obj.object_relationship_type.endswith('_role'):
# roles: these values look like
# "awx.main.models.inventory.Inventory.admin_role"
# due to historical reasons the UI expects just "role" here
return "role"
# default case: these values look like
# "awx.main.models.organization.Organization_notification_templates_success"
# so instead of splitting on period we have to take after the first underscore
try:
return obj.object_relationship_type.split(".")[-1].split("_", 1)[1]
except Exception:
logger.debug('Failed to parse activity stream relationship type {}'.format(obj.object_relationship_type))
return ""
def get_related(self, obj):
rel = {}
if obj.actor is not None:
rel['actor'] = self.reverse('api:user_detail', kwargs={'pk': obj.actor.pk})
for fk, __ in self._local_summarizable_fk_fields:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
rel[fk] = []
id_list = []
for thisItem in m2m_list:
if getattr(thisItem, 'id', None) in id_list:
continue
id_list.append(getattr(thisItem, 'id', None))
if hasattr(thisItem, 'get_absolute_url'):
rel_url = thisItem.get_absolute_url(self.context.get('request'))
else:
view_name = fk + '_detail'
rel_url = self.reverse('api:' + view_name, kwargs={'pk': thisItem.id})
rel[fk].append(rel_url)
if fk == 'schedule':
rel['unified_job_template'] = thisItem.unified_job_template.get_absolute_url(self.context.get('request'))
if obj.setting and obj.setting.get('category', None):
rel['setting'] = self.reverse(
'api:setting_singleton_detail',
kwargs={'category_slug': obj.setting['category']}
)
return rel
def _get_rel(self, obj, fk):
related_model = ActivityStream._meta.get_field(fk).related_model
related_manager = getattr(obj, fk)
if issubclass(related_model, PolymorphicModel) and hasattr(obj, '_prefetched_objects_cache'):
# HACK: manually fill PolymorphicModel caches to prevent running query multiple times
# unnecessary if django-polymorphic issue #68 is solved
if related_manager.prefetch_cache_name not in obj._prefetched_objects_cache:
obj._prefetched_objects_cache[related_manager.prefetch_cache_name] = list(related_manager.all())
return related_manager.all()
def get_summary_fields(self, obj):
summary_fields = OrderedDict()
for fk, related_fields in self._local_summarizable_fk_fields:
try:
if not hasattr(obj, fk):
continue
m2m_list = self._get_rel(obj, fk)
if m2m_list:
summary_fields[fk] = []
for thisItem in m2m_list:
if fk == 'job':
summary_fields['job_template'] = []
job_template_item = {}
job_template_fields = SUMMARIZABLE_FK_FIELDS['job_template']
job_template = getattr(thisItem, 'job_template', None)
if job_template is not None:
for field in job_template_fields:
fval = getattr(job_template, field, None)
if fval is not None:
job_template_item[field] = fval
summary_fields['job_template'].append(job_template_item)
if fk == 'workflow_job_template_node':
summary_fields['workflow_job_template'] = []
workflow_job_template_item = {}
workflow_job_template_fields = SUMMARIZABLE_FK_FIELDS['workflow_job_template']
workflow_job_template = getattr(thisItem, 'workflow_job_template', None)
if workflow_job_template is not None:
for field in workflow_job_template_fields:
fval = getattr(workflow_job_template, field, None)
if fval is not None:
workflow_job_template_item[field] = fval
summary_fields['workflow_job_template'].append(workflow_job_template_item)
if fk == 'schedule':
unified_job_template = getattr(thisItem, 'unified_job_template', None)
if unified_job_template is not None:
summary_fields[get_type_for_model(unified_job_template)] = {'id': unified_job_template.id,
'name': unified_job_template.name}
thisItemDict = {}
for field in related_fields:
fval = getattr(thisItem, field, None)
if fval is not None:
thisItemDict[field] = fval
summary_fields[fk].append(thisItemDict)
except ObjectDoesNotExist:
pass
if obj.actor is not None:
summary_fields['actor'] = dict(id = obj.actor.id,
username = obj.actor.username,
first_name = obj.actor.first_name,
last_name = obj.actor.last_name)
elif obj.deleted_actor:
summary_fields['actor'] = obj.deleted_actor.copy()
summary_fields['actor']['id'] = None
if obj.setting:
summary_fields['setting'] = [obj.setting]
return summary_fields
|
py | 1a5431d37ebda58e5b218f10d2b25e4f51243a49 | import pandas as pd
from bokeh.plotting import figure
from bokeh.sampledata.stocks import AAPL
def get_template():
df = pd.DataFrame(AAPL)
df['date'] = pd.to_datetime(df['date'])
import jinja2
from bokeh.embed import components
# IMPORTANT NOTE!! The version of BokehJS loaded in the template should match
# the version of Bokeh installed locally.
template = jinja2.Template("""
<!DOCTYPE html>
<html lang="en-US">
<link
href="http://cdn.pydata.org/bokeh/release/bokeh-0.12.11.min.css"
rel="stylesheet" type="text/css">
<link
href="http://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.11.min.css"
rel="stylesheet" type="text/css">
<link
href="http://cdn.pydata.org/bokeh/release/bokeh-tables-0.12.11.min.css"
rel="stylesheet" type="text/css">
<script src="http://cdn.pydata.org/bokeh/release/bokeh-0.12.11.min.js"></script>
<script src="http://cdn.pydata.org/bokeh/release/bokeh-widgets-0.12.11.min.js"></script>
<script src="http://cdn.pydata.org/bokeh/release/bokeh-tables-0.12.11.min.js"></script>
<body>
<h1>Hello Bokeh!</h1>
<p> Below is a simple plot of stock closing prices </p>
{{ script }}
{{ div }}
</body>
</html>
""")
p = figure(plot_width=500, plot_height=250, x_axis_type="datetime")
p.line(df['date'], df['close'], color='navy', alpha=0.5)
p.toolbar.logo = None
script, div = components(p)
return template.render(script=script, div=div)
|
py | 1a5432055252b8b054357c9191785fed28ceaf46 | class Solution:
# @param {character[][]} grid
# @return {integer}
def numIslands(self, grid):
if not grid:
return 0
m = len(grid)
n = len(grid[0])
count = 0
for i in range(m):
for j in range(n):
if grid[i][j] == '1':
count += 1
self.touch(grid, i,j,m,n)
return count
def touch(self,grid,i,j,m,n):
grid[i][j] = '0'
if i > 0 and grid[i-1][j] == '1': self.touch(grid, i-1,j,m,n)
if i < m -1 and grid[i+1][j] == '1': self.touch(grid, i+1,j,m,n)
if j > 0 and grid[i][j-1] == '1': self.touch(grid, i,j-1,m,n)
if j < n -1 and grid[i][j+1] == '1': self.touch(grid, i,j+1,m,n)
|
py | 1a543384a85e44e55cfb5271e8c40276841f878d | #!/usr/bin/env python
# -*- coding: utf8 -*-
import leather
from agate import Table
from agate.data_types import Number, Text
class TestTableCharts(leather.LeatherTestCase):
def setUp(self):
self.rows = (
(1, 4, 'a'),
(2, 3, 'b'),
(None, 2, u'👍')
)
self.number_type = Number()
self.text_type = Text()
self.column_names = ['one', 'two', 'three']
self.column_types = [self.number_type, self.number_type, self.text_type]
self.table = Table(self.rows, self.column_names, self.column_types)
def test_bar_chart(self):
text = self.table.bar_chart(label='three', value='one')
svg = self.parse_svg(text)
self.assertElementCount(svg, '.axis', 2)
self.assertElementCount(svg, '.series', 1)
self.assertElementCount(svg, '.bars', 1)
self.assertElementCount(svg, 'rect', 3)
text2 = self.table.bar_chart(label=2, value=0)
self.assertEqual(text, text2)
def test_column_chart(self):
text = self.table.column_chart(label='three', value='one')
svg = self.parse_svg(text)
self.assertElementCount(svg, '.axis', 2)
self.assertElementCount(svg, '.series', 1)
self.assertElementCount(svg, '.columns', 1)
self.assertElementCount(svg, 'rect', 3)
text2 = self.table.column_chart(label=2, value=0)
self.assertEqual(text, text2)
def test_line_chart(self):
text = self.table.line_chart(x='one', y='two')
svg = self.parse_svg(text)
self.assertElementCount(svg, '.axis', 2)
self.assertElementCount(svg, '.series', 1)
self.assertElementCount(svg, 'path', 1)
text2 = self.table.line_chart(x=0, y=1)
self.assertEqual(text, text2)
def test_scatterplot(self):
text = self.table.scatterplot(x='one', y='two')
svg = self.parse_svg(text)
self.assertElementCount(svg, '.axis', 2)
self.assertElementCount(svg, '.series', 1)
self.assertElementCount(svg, '.dots', 1)
self.assertElementCount(svg, 'circle', 2)
text2 = self.table.scatterplot(x=0, y=1)
self.assertEqual(text, text2)
|
py | 1a54341bd0f0c73d02c3fc73ae3c3b33f2fe0dee | import librosa
import os
import numpy as np
import matplotlib.pyplot as plt
from scipy.fftpack import dct
from scipy.signal import spectrogram
import operator
import pickle
import time
import csv
from random import shuffle
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from sklearn import metrics
import logging
################################################# LOG FILE CONFIGURATION ##################################
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(levelname)s %(message)s',
filename='log_file_combined_dict_norm_all_examples.log',
filemode='w')
################################################# MEL PARAMETERS DEFINITION ##################################
class parameters:
def __init__(self):
# specify file path when executing
self.win_size = 1024
self.hop_size = 512
self.min_freq = 80
self.max_freq = 4000
self.num_mel_filts = 40
self.n_dct = 13
param = parameters()
logging.info(param.__dict__)
##################################### FUNCTIONS FOR MEL SPECTRUM CALCULATION ##################################
# converts frequency in Hz to Mel values
# pass a numpy array to the function
def hz2mel(hzval):
melval = 1127.01028*np.log(1+hzval/700)
return melval
# funtion tested with example
# converts Mel values to Hz
# pass a numpy array to the function
def mel2hz(melval):
hzval = 700*(np.exp(melval/1127.01028)-1)
return hzval
# function tested with example
# f_fft will be the input
# rounding to the values in freq_list
def find_nearest(values,freq_list):
q_freq_ind=[]
for value in values.tolist():
ind = np.argmin(np.abs(value-freq_list))
q_freq_ind.append(ind)
return np.asarray(q_freq_ind)
def compute_mfcc(filepath,win_size,hop_size,min_freq,max_freq,num_mel_filts,n_dct):
melval = hz2mel(np.array([min_freq,max_freq]))
min_mel = melval[0]
max_mel = melval[1]
step = (max_mel-min_mel)/(num_mel_filts-1)
mel_freq_list = np.linspace(min_mel,max_mel,num_mel_filts)
mel_freq_list = np.concatenate(([mel_freq_list[0]-step],mel_freq_list,[mel_freq_list[-1]+step]))
hz_freq_list = mel2hz(mel_freq_list)
nfft = win_size # number of ft points for the spectrogram
# make sure librosa is imported
x,Fs = librosa.load(filepath,sr=16000)
f,t,Sxx = spectrogram(x,Fs,nperseg=win_size,noverlap=win_size-hop_size,nfft=nfft)
Sxx = np.square(np.abs(Sxx))
# the spectrogram has to be plotted flipped up-dpwn to make the lower freq show at the bottom
fft_freq_indices = find_nearest(hz_freq_list,f)# approximate the fft freq list to the nearest value in the hz freq list got by converting mel scale
# print(fft_freq_indices,'len=',fft_freq_indices.shape)
filt_bank = np.zeros((1,int(nfft/2) + 1))
for i in range(1,fft_freq_indices.shape[0]-1):# from sec ele to sec last ele
a = fft_freq_indices[i-1]
b = fft_freq_indices[i]
c = fft_freq_indices[i+1]
t1 = (1/(b-a))*np.linspace(a-a,b-a,b-a+1)
t2 = (-1/(c-b))*np.linspace(b-c,c-c,c-b+1)
filt = np.concatenate((t1,t2[1:]))
filt = filt/(np.sum(filt))
filt_zero_pad = np.zeros((1,int(nfft/2)+1))
filt_zero_pad[0,a:c+1] = filt
filt_bank = np.concatenate((filt_bank,filt_zero_pad),axis=0)
filt_bank = filt_bank[1:,:]
mel_spec = np.dot(filt_bank,Sxx)
mel_spec = np.where(mel_spec == 0, np.finfo(float).eps, mel_spec) # for numerical stability
mel_spec = 20*np.log10(mel_spec)
fs_mfcc = mel_spec.shape[1]
return mel_spec,fs_mfcc # returning the mel_spectrum
############################# CREATING COMBINED DICT BY JOINING ANNOTATIONS+CLIP_INFO ##########################
# /Users/nitin/Documents/Music Info Retrieval/project/database/magnatagatune/data_from_trey
f = open('/scratch/nn1174/MIR/data_from_trey/annotations_final.txt', 'r')
reader = csv.reader(f, delimiter='\t')
tags = next(reader)
annotation_dict = {}
while True:
try:
values = next(reader)
annotation_dict[values[0]] = {}# data is a dict. values[0] is the clip id, which is the key->pointing to a dict of all tags
for tagnames, value in zip(tags[1:], values[1:]):
annotation_dict[values[0]][tagnames] = value
except StopIteration:
logging.info('end tag annotations file')
break
ff = open('/scratch/nn1174/MIR/data_from_trey/clip_info_final.txt', 'r')
rreader = csv.reader(ff, delimiter='\t')
metadata = next(rreader)
clip_inf_dict = {}
while True:
try:
values = next(rreader)
# values1 = next(reader, None)
clip_inf_dict[values[0]] = {}
for metdat, val in zip(metadata[1:], values[1:]):
clip_inf_dict[values[0]][metdat] = val
except StopIteration:
logging.info('end clip info file')
break
combined_dict = {}
for key in annotation_dict.keys(): # you can list as many input dicts as you want here
combined_dict[key] = annotation_dict[key].copy()
combined_dict[key].update(clip_inf_dict[key])
# IMPORTANT DECLARATION DEFINING VARIABLE "KEYS"
keys = list(combined_dict.keys())
logging.info('done combining the dictionaries')
logging.info(len(combined_dict.keys()))
logging.info(len(combined_dict['2'].keys()))
################################ LOADING ALL PICKLE FILES NEEDED FOR INDEXING ##################################
with open('train_ind.pickle','rb') as handle:
train_ind = pickle.load(handle)
with open('val_ind.pickle','rb') as handle:
val_ind = pickle.load(handle)
with open('test_ind.pickle','rb') as handle:
test_ind = pickle.load(handle)
### loading sorted tags
with open('sorted_tags.pickle', 'rb') as handle:
sorted_stats = pickle.load(handle)
################################## CALCULATING THE NORMALIZATION COEFFICIENTS ##################################
start_time = time.time()
spec_mat_train = np.zeros((len(train_ind),40,909))
datapath = '/scratch/nn1174/MIR/mp3_all'
logging.info('starting to create spec_mat_trin to generate the normalizing COEFFICIENTS')
for i,ind in enumerate(train_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
spec_mat_train[i,:,:] = np.zeros((40,909))
else:
songpath = os.path.join(datapath,combined_dict[keys[ind]]['mp3_path'])
spec,fs_spec = compute_mfcc(songpath,param.win_size,param.hop_size,param.min_freq,
param.max_freq,param.num_mel_filts,param.n_dct)
spec_mat_train[i,:,:] = spec
if i%20==0:
logging.info(i)
###### normalizing parameters
mn = np.mean(spec_mat_train,axis=0)
stdev = np.std(spec_mat_train,axis=0)
norm_coeff = [mn,stdev]
with open('norm_coeff.pickle','wb') as handle:
pickle.dump(norm_coeff,handle)
######
logging.info('got the mean and std')
########################## ADDING MEL SPECTRUM AND OUTPUT FIELDS IN DICTIONARY ##################################
logging.info('appending spectrum+output to validation set')
for i,ind in enumerate(val_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
combined_dict[keys[ind]]['mel_spectrum'] = np.zeros((40,909))
combined_dict[keys[ind]]['output'] = np.zeros((50))
else:
songpath = os.path.join(datapath,combined_dict[keys[ind]]['mp3_path'])
spec,fs_spec = compute_mfcc(songpath,param.win_size,param.hop_size,param.min_freq,
param.max_freq,param.num_mel_filts,param.n_dct)
spec = (spec-mn)/stdev # normalize it
combined_dict[keys[ind]]['mel_spectrum'] = (spec,fs_spec)
output=[]
for j,tag in enumerate(sorted_stats):
if j>49:
break
else:
output.append(int(combined_dict[keys[ind]][tag[0]]))
output = np.array(output)
combined_dict[keys[ind]]['output'] = output
if i%20==0:
logging.info(i)
logging.info('appending spectrum+output to test set')
for i,ind in enumerate(test_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
combined_dict[keys[ind]]['mel_spectrum'] = np.zeros((40,909))
combined_dict[keys[ind]]['output'] = np.zeros((50))
else:
songpath = os.path.join(datapath,combined_dict[keys[ind]]['mp3_path'])
spec,fs_spec = compute_mfcc(songpath,param.win_size,param.hop_size,param.min_freq,
param.max_freq,param.num_mel_filts,param.n_dct)
spec = (spec-mn)/stdev # normalize it
combined_dict[keys[ind]]['mel_spectrum'] = (spec,fs_spec)
output=[]
for j,tag in enumerate(sorted_stats):
if j>49:
break
else:
output.append(int(combined_dict[keys[ind]][tag[0]]))
output = np.array(output)
combined_dict[keys[ind]]['output'] = output
if i%20 == 0:
logging.info(i)
logging.info('appending spectrum+output to train set')
for i,ind in enumerate(train_ind):
if keys[ind]=='35644' or keys[ind]=='55753' or keys[ind]=='57881':
combined_dict[keys[ind]]['mel_spectrum'] = spec_mat_train[i,:,:]
combined_dict[keys[ind]]['output'] = np.zeros((50))
else:
spec = spec_mat_train[i,:,:] # using already calculated spectrograms
spec = (spec-mn)/stdev # normalize it
combined_dict[keys[ind]]['mel_spectrum'] = (spec,909)# hard coded , but never used, so doesnt matter
output=[]
for j,tag in enumerate(sorted_stats):
if j>49:
break
else:
output.append(int(combined_dict[keys[ind]][tag[0]]))
output = np.array(output)
combined_dict[keys[ind]]['output'] = output
if i%20 == 0:
logging.info(i)
logging.info('Done with creating the spec_matrices')
logging.info('done with generating the whole combined_dict')
with open('combined_dict_norm_all_examples.pickle', 'wb') as handle:
pickle.dump(combined_dict, handle)
logging.info('Done with Everything')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.