filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_10025 | import ibm_boto3
from ibm_botocore.client import Config, ClientError
class CloudObjectStore:
DEFAULT_ENDPOINT = \
'https://s3.us-east.cloud-object-storage.appdomain.cloud'
DEFAULT_AUTH_ENDPOINT = \
'https://iam.cloud.ibm.com/identity/token'
'''
Interface to IBM Cloud Object Store, typical values:
bucket_name: name of your storage bucket
api_key: your API key
(go to Cloud Storage dashboard -> Service credentials)
resource_crn: your bucket CRN
(go to your bucket -> Configuration: Bucket instance CRN)
endpoint: for external access,
"https://s3.us-east.cloud-object-storage.appdomain.cloud"
endpoint: for internal access,
"https://s3.private.us-east.cloud-object-storage.appdomain.cloud"
auth_endpoint: "https://iam.cloud.ibm.com/identity/token"
'''
def __init__(self, bucket_name,
api_key,
resource_crn,
endpoint=DEFAULT_ENDPOINT,
auth_endpoint=DEFAULT_AUTH_ENDPOINT,
):
self.bucket_name = bucket_name
self.COS_API_KEY_ID = api_key
self.COS_RESOURCE_CRN = resource_crn
self.COS_ENDPOINT = endpoint
self.COS_AUTH_ENDPOINT = auth_endpoint
self.cos = ibm_boto3.resource(
"s3",
ibm_api_key_id=self.COS_API_KEY_ID,
ibm_service_instance_id=self.COS_RESOURCE_CRN,
ibm_auth_endpoint=self.COS_AUTH_ENDPOINT,
config=Config(signature_version="oauth"),
endpoint_url=self.COS_ENDPOINT
)
def get_bucket_contents(self):
try:
files = self.cos.Bucket(self.bucket_name).objects.all()
return [file.key for file in files]
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to retrieve bucket contents: {0}".format(e))
def get_item(self, item_name):
try:
file = self.cos.Object(self.bucket_name, item_name).get()
return file["Body"].read()
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to retrieve file contents: {0}".format(e))
def create_item(self, item_name, file_text):
print("Creating new item: {0}".format(item_name))
try:
self.cos.Object(self.bucket_name, item_name).put(
Body=file_text
)
except ClientError as be:
print("CLIENT ERROR: {0}\n".format(be))
except Exception as e:
print("Unable to create text file: {0}".format(e))
|
the-stack_0_10030 | # Copyright 2015 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import random
import netaddr
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_db import exception as db_exc
from oslo_log import log
from oslo_utils import uuidutils
from neutron._i18n import _, _LE
from neutron.ipam import driver as ipam_base
from neutron.ipam.drivers.neutrondb_ipam import db_api as ipam_db_api
from neutron.ipam import exceptions as ipam_exc
from neutron.ipam import requests as ipam_req
from neutron.ipam import subnet_alloc
from neutron.ipam import utils as ipam_utils
LOG = log.getLogger(__name__)
class NeutronDbSubnet(ipam_base.Subnet):
"""Manage IP addresses for Neutron DB IPAM driver.
This class implements the strategy for IP address allocation and
deallocation for the Neutron DB IPAM driver.
"""
@classmethod
def create_allocation_pools(cls, subnet_manager, context, pools, cidr):
for pool in pools:
# IPv6 addresses that start '::1', '::2', etc cause IP version
# ambiguity when converted to integers by pool.first and pool.last.
# Infer the IP version from the subnet cidr.
ip_version = cidr.version
subnet_manager.create_pool(
context,
netaddr.IPAddress(pool.first, ip_version).format(),
netaddr.IPAddress(pool.last, ip_version).format())
@classmethod
def create_from_subnet_request(cls, subnet_request, ctx):
ipam_subnet_id = uuidutils.generate_uuid()
subnet_manager = ipam_db_api.IpamSubnetManager(
ipam_subnet_id,
subnet_request.subnet_id)
# Create subnet resource
subnet_manager.create(ctx)
# If allocation pools are not specified, define them around
# the subnet's gateway IP
if not subnet_request.allocation_pools:
pools = ipam_utils.generate_pools(subnet_request.subnet_cidr,
subnet_request.gateway_ip)
else:
pools = subnet_request.allocation_pools
# Create IPAM allocation pools
cls.create_allocation_pools(subnet_manager, ctx, pools,
subnet_request.subnet_cidr)
return cls(ipam_subnet_id,
ctx,
cidr=subnet_request.subnet_cidr,
allocation_pools=pools,
gateway_ip=subnet_request.gateway_ip,
tenant_id=subnet_request.tenant_id,
subnet_id=subnet_request.subnet_id)
@classmethod
def load(cls, neutron_subnet_id, ctx):
"""Load an IPAM subnet from the database given its neutron ID.
:param neutron_subnet_id: neutron subnet identifier.
"""
ipam_subnet = ipam_db_api.IpamSubnetManager.load_by_neutron_subnet_id(
ctx, neutron_subnet_id)
if not ipam_subnet:
LOG.error(_LE("IPAM subnet referenced to "
"Neutron subnet %s does not exist"),
neutron_subnet_id)
raise n_exc.SubnetNotFound(subnet_id=neutron_subnet_id)
pools = []
for pool in ipam_subnet.allocation_pools:
pools.append(netaddr.IPRange(pool['first_ip'], pool['last_ip']))
neutron_subnet = cls._fetch_subnet(ctx, neutron_subnet_id)
return cls(ipam_subnet['id'],
ctx,
cidr=neutron_subnet['cidr'],
allocation_pools=pools,
gateway_ip=neutron_subnet['gateway_ip'],
tenant_id=neutron_subnet['tenant_id'],
subnet_id=neutron_subnet_id)
@classmethod
def _fetch_subnet(cls, context, id):
plugin = directory.get_plugin()
return plugin._get_subnet(context, id)
def __init__(self, internal_id, ctx, cidr=None,
allocation_pools=None, gateway_ip=None, tenant_id=None,
subnet_id=None):
# NOTE: In theory it could have been possible to grant the IPAM
# driver direct access to the database. While this is possible,
# it would have led to duplicate code and/or non-trivial
# refactorings in neutron.db.db_base_plugin_v2.
# This is because in the Neutron V2 plugin logic DB management is
# encapsulated within the plugin.
self._cidr = cidr
self._pools = allocation_pools
self._gateway_ip = gateway_ip
self._tenant_id = tenant_id
self._subnet_id = subnet_id
self.subnet_manager = ipam_db_api.IpamSubnetManager(internal_id,
self._subnet_id)
self._context = ctx
def _verify_ip(self, context, ip_address):
"""Verify whether IP address can be allocated on subnet.
:param context: neutron api request context
:param ip_address: String representing the IP address to verify
:raises: InvalidInput, IpAddressAlreadyAllocated
"""
# Ensure that the IP's are unique
if not self.subnet_manager.check_unique_allocation(context,
ip_address):
raise ipam_exc.IpAddressAlreadyAllocated(
subnet_id=self.subnet_manager.neutron_id,
ip=ip_address)
# Ensure that the IP is valid on the subnet
if not ipam_utils.check_subnet_ip(self._cidr, ip_address):
raise ipam_exc.InvalidIpForSubnet(
subnet_id=self.subnet_manager.neutron_id,
ip=ip_address)
def _generate_ip(self, context, prefer_next=False):
"""Generate an IP address from the set of available addresses."""
ip_allocations = netaddr.IPSet()
for ipallocation in self.subnet_manager.list_allocations(context):
ip_allocations.add(ipallocation.ip_address)
for ip_pool in self.subnet_manager.list_pools(context):
ip_set = netaddr.IPSet()
ip_set.add(netaddr.IPRange(ip_pool.first_ip, ip_pool.last_ip))
av_set = ip_set.difference(ip_allocations)
if av_set.size == 0:
continue
if prefer_next:
window = 1
else:
# Compute a value for the selection window
window = min(av_set.size, 30)
ip_index = random.randint(1, window)
candidate_ips = list(itertools.islice(av_set, ip_index))
allocated_ip = candidate_ips[
random.randint(0, len(candidate_ips) - 1)]
return str(allocated_ip), ip_pool.id
raise ipam_exc.IpAddressGenerationFailure(
subnet_id=self.subnet_manager.neutron_id)
def allocate(self, address_request):
# NOTE(pbondar): Ipam driver is always called in context of already
# running transaction, which is started on create_port or upper level.
# To be able to do rollback/retry actions correctly ipam driver
# should not create new nested transaction blocks.
all_pool_id = None
# NOTE(salv-orlando): It would probably better to have a simpler
# model for address requests and just check whether there is a
# specific IP address specified in address_request
if isinstance(address_request, ipam_req.SpecificAddressRequest):
# This handles both specific and automatic address requests
# Check availability of requested IP
ip_address = str(address_request.address)
self._verify_ip(self._context, ip_address)
else:
prefer_next = isinstance(address_request,
ipam_req.PreferNextAddressRequest)
ip_address, all_pool_id = self._generate_ip(self._context,
prefer_next)
# Create IP allocation request object
# The only defined status at this stage is 'ALLOCATED'.
# More states will be available in the future - e.g.: RECYCLABLE
try:
with self._context.session.begin(subtransactions=True):
# NOTE(kevinbenton): we use a subtransaction to force
# a flush here so we can capture DBReferenceErrors due
# to concurrent subnet deletions. (galera would deadlock
# later on final commit)
self.subnet_manager.create_allocation(self._context,
ip_address)
except db_exc.DBReferenceError:
raise n_exc.SubnetNotFound(
subnet_id=self.subnet_manager.neutron_id)
return ip_address
def deallocate(self, address):
# This is almost a no-op because the Neutron DB IPAM driver does not
# delete IPAllocation objects at every deallocation. The only
# operation it performs is to delete an IPRequest entry.
count = self.subnet_manager.delete_allocation(
self._context, address)
# count can hardly be greater than 1, but it can be 0...
if not count:
raise ipam_exc.IpAddressAllocationNotFound(
subnet_id=self.subnet_manager.neutron_id,
ip_address=address)
def _no_pool_changes(self, context, pools):
"""Check if pool updates in db are required."""
db_pools = self.subnet_manager.list_pools(context)
iprange_pools = [netaddr.IPRange(pool.first_ip, pool.last_ip)
for pool in db_pools]
return pools == iprange_pools
def update_allocation_pools(self, pools, cidr):
# Pools have already been validated in the subnet request object which
# was sent to the subnet pool driver. Further validation should not be
# required.
if self._no_pool_changes(self._context, pools):
return
self.subnet_manager.delete_allocation_pools(self._context)
self.create_allocation_pools(self.subnet_manager, self._context, pools,
cidr)
self._pools = pools
def get_details(self):
"""Return subnet data as a SpecificSubnetRequest"""
return ipam_req.SpecificSubnetRequest(
self._tenant_id, self.subnet_manager.neutron_id,
self._cidr, self._gateway_ip, self._pools)
class NeutronDbPool(subnet_alloc.SubnetAllocator):
"""Subnet pools backed by Neutron Database.
As this driver does not implement yet the subnet pool concept, most
operations are either trivial or no-ops.
"""
def get_subnet(self, subnet_id):
"""Retrieve an IPAM subnet.
:param subnet_id: Neutron subnet identifier
:returns: a NeutronDbSubnet instance
"""
return NeutronDbSubnet.load(subnet_id, self._context)
def allocate_subnet(self, subnet_request):
"""Create an IPAMSubnet object for the provided cidr.
This method does not actually do any operation in the driver, given
its simplified nature.
:param cidr: subnet's CIDR
:returns: a NeutronDbSubnet instance
"""
if self._subnetpool:
subnet = super(NeutronDbPool, self).allocate_subnet(subnet_request)
subnet_request = subnet.get_details()
# SubnetRequest must be an instance of SpecificSubnet
if not isinstance(subnet_request, ipam_req.SpecificSubnetRequest):
raise ipam_exc.InvalidSubnetRequestType(
subnet_type=type(subnet_request))
return NeutronDbSubnet.create_from_subnet_request(subnet_request,
self._context)
def update_subnet(self, subnet_request):
"""Update subnet info the in the IPAM driver.
The only update subnet information the driver needs to be aware of
are allocation pools.
"""
if not subnet_request.subnet_id:
raise ipam_exc.InvalidSubnetRequest(
reason=_("An identifier must be specified when updating "
"a subnet"))
if subnet_request.allocation_pools is None:
LOG.debug("Update subnet request for subnet %s did not specify "
"new allocation pools, there is nothing to do",
subnet_request.subnet_id)
return
subnet = NeutronDbSubnet.load(subnet_request.subnet_id, self._context)
cidr = netaddr.IPNetwork(subnet._cidr)
subnet.update_allocation_pools(subnet_request.allocation_pools, cidr)
return subnet
def remove_subnet(self, subnet_id):
"""Remove data structures for a given subnet.
IPAM-related data has no foreign key relationships to neutron subnet,
so removing ipam subnet manually
"""
count = ipam_db_api.IpamSubnetManager.delete(self._context,
subnet_id)
if count < 1:
LOG.error(_LE("IPAM subnet referenced to "
"Neutron subnet %s does not exist"),
subnet_id)
raise n_exc.SubnetNotFound(subnet_id=subnet_id)
def needs_rollback(self):
return False
|
the-stack_0_10031 | import warnings
import numpy as np
from vispy.color import Colormap as VispyColormap
from vispy.scene.node import Node
from ..utils.translations import trans
from .image import Image as ImageNode
from .utils_gl import fix_data_dtype
from .vispy_base_layer import VispyBaseLayer
from .volume import Volume as VolumeNode
class ImageLayerNode:
def __init__(self, custom_node: Node = None):
self._custom_node = custom_node
self._image_node = ImageNode(None, method='auto')
self._volume_node = VolumeNode(
np.zeros((1, 1, 1), dtype=np.float32), clim=[0, 1]
)
def get_node(self, ndisplay: int) -> Node:
# Return custom node if we have one.
if self._custom_node is not None:
return self._custom_node
# Return Image or Volume node based on 2D or 3D.
if ndisplay == 2:
return self._image_node
return self._volume_node
class VispyImageLayer(VispyBaseLayer):
def __init__(self, layer, node=None):
# Use custom node from caller, or our standard image/volume nodes.
self._layer_node = ImageLayerNode(node)
# Default to 2D (image) node.
super().__init__(layer, self._layer_node.get_node(2))
self._array_like = True
self.layer.events.rendering.connect(self._on_rendering_change)
self.layer.events.interpolation.connect(self._on_interpolation_change)
self.layer.events.colormap.connect(self._on_colormap_change)
self.layer.events.contrast_limits.connect(
self._on_contrast_limits_change
)
self.layer.events.gamma.connect(self._on_gamma_change)
self.layer.events.iso_threshold.connect(self._on_iso_threshold_change)
self.layer.events.attenuation.connect(self._on_attenuation_change)
self.layer.experimental_slicing_plane.events.enabled.connect(
self._on_experimental_slicing_plane_enabled_change
)
self.layer.experimental_slicing_plane.events.position.connect(
self._on_experimental_slicing_plane_position_change
)
self.layer.experimental_slicing_plane.events.thickness.connect(
self._on_experimental_slicing_plane_thickness_change
)
self.layer.experimental_slicing_plane.events.normal.connect(
self._on_experimental_slicing_plane_normal_change
)
self._on_display_change()
self._on_data_change()
def _on_display_change(self, data=None):
parent = self.node.parent
self.node.parent = None
self.node = self._layer_node.get_node(self.layer._ndisplay)
if data is None:
data = np.zeros((1,) * self.layer._ndisplay)
if self.layer._empty:
self.node.visible = False
else:
self.node.visible = self.layer.visible
if self.layer.loaded:
self.node.set_data(data)
self.node.parent = parent
self.node.order = self.order
self.reset()
def _on_data_change(self, event=None):
if not self.layer.loaded:
# Do nothing if we are not yet loaded. Calling astype below could
# be very expensive. Lets not do it until our data has been loaded.
return
self._set_node_data(self.node, self.layer._data_view)
def _set_node_data(self, node, data):
"""Our self.layer._data_view has been updated, update our node."""
data = fix_data_dtype(data)
if self.layer._ndisplay == 3 and self.layer.ndim == 2:
data = np.expand_dims(data, axis=0)
# Check if data exceeds MAX_TEXTURE_SIZE and downsample
if self.MAX_TEXTURE_SIZE_2D is not None and self.layer._ndisplay == 2:
data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_2D)
elif (
self.MAX_TEXTURE_SIZE_3D is not None and self.layer._ndisplay == 3
):
data = self.downsample_texture(data, self.MAX_TEXTURE_SIZE_3D)
# Check if ndisplay has changed current node type needs updating
if (
self.layer._ndisplay == 3 and not isinstance(node, VolumeNode)
) or (self.layer._ndisplay == 2 and not isinstance(node, ImageNode)):
self._on_display_change(data)
else:
node.set_data(data)
if self.layer._empty:
node.visible = False
else:
node.visible = self.layer.visible
# Call to update order of translation values with new dims:
self._on_matrix_change()
node.update()
def _on_interpolation_change(self, event=None):
self.node.interpolation = self.layer.interpolation
def _on_rendering_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.method = self.layer.rendering
self._on_attenuation_change()
self._on_iso_threshold_change()
def _on_colormap_change(self, event=None):
self.node.cmap = VispyColormap(*self.layer.colormap)
def _on_contrast_limits_change(self, event=None):
self.node.clim = self.layer.contrast_limits
def _on_gamma_change(self, event=None):
if len(self.node.shared_program.frag._set_items) > 0:
self.node.gamma = self.layer.gamma
def _on_iso_threshold_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.threshold = self.layer.iso_threshold
def _on_attenuation_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.attenuation = self.layer.attenuation
def _on_experimental_slicing_plane_enabled_change(self, event=None):
if isinstance(self.node, VolumeNode):
if self.layer.experimental_slicing_plane.enabled is True:
raycasting_mode = 'plane'
else:
raycasting_mode = 'volume'
self.node.raycasting_mode = raycasting_mode
def _on_experimental_slicing_plane_thickness_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.plane_thickness = (
self.layer.experimental_slicing_plane.thickness
)
def _on_experimental_slicing_plane_position_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.plane_position = (
self.layer.experimental_slicing_plane.position
)
def _on_experimental_slicing_plane_normal_change(self, event=None):
if isinstance(self.node, VolumeNode):
self.node.plane_normal = (
self.layer.experimental_slicing_plane.normal
)
def reset(self, event=None):
self._reset_base()
self._on_interpolation_change()
self._on_colormap_change()
self._on_contrast_limits_change()
self._on_gamma_change()
self._on_rendering_change()
self._on_experimental_slicing_plane_enabled_change()
self._on_experimental_slicing_plane_position_change()
self._on_experimental_slicing_plane_normal_change()
self._on_experimental_slicing_plane_thickness_change()
def downsample_texture(self, data, MAX_TEXTURE_SIZE):
"""Downsample data based on maximum allowed texture size.
Parameters
----------
data : array
Data to be downsampled if needed.
MAX_TEXTURE_SIZE : int
Maximum allowed texture size.
Returns
-------
data : array
Data that now fits inside texture.
"""
if np.any(np.greater(data.shape, MAX_TEXTURE_SIZE)):
if self.layer.multiscale:
raise ValueError(
trans._(
"Shape of in dividual tiles in multiscale {shape} cannot exceed GL_MAX_TEXTURE_SIZE {texture_size}. Rendering is currently in {ndisplay}D mode.",
deferred=True,
shape=data.shape,
texture_size=MAX_TEXTURE_SIZE,
ndisplay=self.layer._ndisplay,
)
)
warnings.warn(
trans._(
"data shape {shape} exceeds GL_MAX_TEXTURE_SIZE {texture_size} in at least one axis and will be downsampled. Rendering is currently in {ndisplay}D mode.",
deferred=True,
shape=data.shape,
texture_size=MAX_TEXTURE_SIZE,
ndisplay=self.layer._ndisplay,
)
)
downsample = np.ceil(
np.divide(data.shape, MAX_TEXTURE_SIZE)
).astype(int)
scale = np.ones(self.layer.ndim)
for i, d in enumerate(self.layer._dims_displayed):
scale[d] = downsample[i]
self.layer._transforms['tile2data'].scale = scale
self._on_matrix_change()
slices = tuple(slice(None, None, ds) for ds in downsample)
data = data[slices]
return data
|
the-stack_0_10033 | ###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
from __future__ import print_function
import random
from os import environ
from twisted.internet.defer import inlineCallbacks
from autobahn.twisted.util import sleep
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that publishes events with no payload
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
counter = 0
while True:
print("publish: com.myapp.heartbeat")
self.publish(u'com.myapp.heartbeat')
yield sleep(1)
if __name__ == '__main__':
runner = ApplicationRunner(
environ.get("AUTOBAHN_DEMO_ROUTER", u"ws://127.0.0.1:8080/ws"),
u"realm1",
)
runner.run(Component)
|
the-stack_0_10034 | import unittest
from tri.delaunay.helpers import ToPointsAndSegments
from grassfire import calc_skel, calc_offsets
from grassfire.events import at_same_location
from grassfire.test.intersection import segments_intersecting
from grassfire.vectorops import dist
import fixtures
def all_tests():
"""Find all functions inside the *fixtures* module
and returns a list with all function objects"""
import inspect
all_functions = inspect.getmembers(fixtures, inspect.isfunction)
return [fn for fn_nm, fn in sorted(all_functions)]
def make_test_cases(fixtures):
"""For all functions in the list, make
an entry in the cases dictionary, by
invoking the function.
"""
cases = {}
for i, f in enumerate(fixtures):
data, total, node, infinite, = f()
assert f.__name__ not in cases, "duplicate test name ({}) found".format(
f.__name__
)
cases[f.__name__] = (
"* {:>2d}: ".format(i) + str(f.__doc__),
data,
total,
node,
infinite,
)
return cases
EXPENSIVE_POST_CONDITION = True
CASES = make_test_cases(all_tests())
INTERACTIVE = False
# CASES = make_test_cases([all_tests()[48]])
# INTERACTIVE = True
# After: https://stackoverflow.com/a/20870875
class TestSequenceMeta(type):
"""A meta class for all our TestCases"""
def __new__(mcs, name, bases, dict):
def gen_test(description, data, total, node, infinite):
def test(self):
if INTERACTIVE:
skel = calc_skel(
data, pause=True, output=True, internal_only=False, shrink=True
# data, pause=False, output=False, internal_only=False, shrink=True
)
else:
skel = calc_skel(data)
# check the amount of segments in the skeleton
self.assertEqual(len(skel.segments()), total)
# check the amount of skeleton nodes
self.assertEqual(len(skel.sk_nodes), node)
# # check the amount of kinetic vertices that are (not) stopped
not_stopped = [v for v in skel.vertices if v.stops_at is None]
stopped = [v for v in skel.vertices if v.stops_at is not None and v.start_node is not v.stop_node]
self.assertEqual(len(not_stopped), infinite)
self.assertEqual(len(stopped), total - infinite)
# check cross relationship between kinetic vertices and skeleton nodes
for v in skel.vertices:
# exact same starting location
if abs(v.velocity[0]) < 100 and abs(v.velocity[1]) < 100: # check only 'slow' moving vertices
self.assertTrue(at_same_location([v.start_node, v], v.starts_at), "{} [{}] {} does not have correct start_node(!) position".format(id(v), v.info, v.velocity))
# quite close at the stop node (given the vertex + its direction/speed)
if True and v.stops_at is not None and not v.inf_fast and (abs(v.velocity[0]) < 100 and abs(v.velocity[1]) < 100):
d = dist(
v.stop_node.position_at(v.stops_at),
v.position_at(v.stops_at),
)
self.assertAlmostEqual(
d,
0.0,
2,
"{} [{}] velocity '{}' does not have correct stop_node position -- dist: {}".format(id(v), v.info, v.velocity, d)
)
# self.assertTrue(at_same_location([v.stop_node, v], v.stops_at),
# '{} != {}; {}'.format(v.stop_node.position_at(v.stops_at), v.position_at(v.stops_at),
# dist(v.stop_node.position_at(v.stops_at), v.position_at(v.stops_at)))
# )
if EXPENSIVE_POST_CONDITION == True:
# check that we do not have any self intersections between segments
self.assertFalse(
segments_intersecting(skel.segments()),
"intersection between straight skeleton segments found",
)
# offset segments should not intersect
# (FIXME: these use left_at of kinetic vertices, also check right_at)
last_evt_time = max(v.stops_at for v in skel.vertices if v.stops_at is not None)
offset_segments = [
(line[0], line[1]) for line in calc_offsets(skel, last_evt_time, 25)
]
self.assertFalse(
segments_intersecting(offset_segments),
"Intersection in offsets found",
)
# with open("/tmp/offsets.wkt", "w") as fh:
# for segment in offset_segments:
# s = "LINESTRING({0[0]} {0[1]}, {1[0]} {1[1]})".format(segment[0], segment[1])
# fh.write(s)
# fh.write("\n")
# set the docstring of the test function
test.__doc__ = description
return test
for tname in CASES:
test_name = "test_%s" % tname
dict[test_name] = gen_test(*CASES[tname])
return type.__new__(mcs, name, bases, dict)
class GrassfireTestCase(unittest.TestCase, metaclass=TestSequenceMeta):
pass
if __name__ == "__main__":
if INTERACTIVE:
import logging
import sys
root = logging.getLogger()
root.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s - %(message)s")
ch.setFormatter(formatter)
root.addHandler(ch)
# import cProfile
# command = """unittest.main(verbosity=10)"""
# cProfile.runctx( command, globals(), locals(), filename="/tmp/gf.profile" )
unittest.main()
|
the-stack_0_10035 | from typing import Callable, List, Tuple
from outdated_item_selection_strategy.no_update import *
from outdated_item_selection_strategy.oldest_chunks_update import *
from outdated_item_selection_strategy.last_n_chunks_update import *
from outdated_item_selection_strategy.regular_interval_update import *
from outdated_item_selection_strategy.binned_update import *
UPDATE_STRATEGY_LABELS = [
"no update",
"oldest n chunks",
"last n chunks",
"regular intervals",
"outdated bins"
]
def get_update_strategies(n_dims: int, n_chunks: int, max_age: int, n_bins: int) -> List[Tuple[str, Callable[[], OutdatedItemSelectionStrategy]]]:
return list(
map(
lambda label: (label, lambda: get_update_strategy(label, n_dims, n_chunks, max_age, n_bins)),
UPDATE_STRATEGY_LABELS
)
)
def get_update_strategy(label: str, n_dims: int, n_chunks: int, max_age: int, n_bins: int) -> OutdatedItemSelectionStrategy:
if label == "no update":
return NoUpdate(n_dims=n_dims, storage=None)
elif label == "oldest n chunks":
return OldestChunksUpdate(n_dims=n_dims, storage=None, max_age=max_age)
elif label == "last n chunks":
return LastNChunksUpdate(n_dims=n_dims, n_chunks=n_chunks, storage=None)
elif label == "regular intervals":
return RegularIntervalUpdate(n_dims=n_dims, n_chunks=n_chunks, storage=None, max_age=max_age)
elif label == "outdated bins":
return BinnedUpdate(n_dims=n_dims, storage=None, n_bins=n_bins)
|
the-stack_0_10037 | import pytest
from src.project.risks import Risk
from src.project.risks.helpers import RiskCounterMeasure, RiskImpact, RiskProbabilty, RiskScore
from tests.faker import faker
@pytest.fixture
def risk():
yield Risk(risk_name="Fake Risk Name", probability=50, impact=100)
def test_create_Risk_object_directly(monkeypatch, stakeholder, risk):
assert risk.risk_name == "Fake Risk Name"
assert risk.impact == 100
assert risk.probability == 50
assert risk.risk_owner is None
assert risk.description is None
assert risk.counter_measure is None
assert risk.get_risk_score() == RiskScore.HIGH
assert isinstance(risk.get_risk_score(), RiskScore)
monkeypatch.setattr(risk, "risk_owner", stakeholder, raising=True)
assert risk.risk_owner == stakeholder
monkeypatch.setattr(risk, "description", "Fake Description", raising=True)
assert risk.description == "Fake Description"
def test_cannot_set_unallowed_counter_measure(risk):
with pytest.raises(AssertionError):
setattr(risk, "counter_measure", "Fake counter measure")
assert risk.counter_measure is None
@pytest.mark.parametrize("counter_measure", ["ReDuce", "prevenT", "aCCepT", "transfer"])
def test_can_set_allowed_counter_measures_and_value_is_case_insensitive(counter_measure, risk):
setattr(risk, "counter_measure", counter_measure)
assert isinstance(risk.counter_measure, RiskCounterMeasure)
assert risk.counter_measure == RiskCounterMeasure(counter_measure.upper())
@pytest.mark.parametrize("wrong_classmethod", ["not_a_real_classmethod", "maybe_probability_high_impact"])
def test_access_to_dynamic_classmethod_not_matched_by_regex_raises(wrong_classmethod):
with pytest.raises(AttributeError):
getattr(Risk, wrong_classmethod)
@pytest.mark.parametrize(
"class_method",
[
"rare_probability_high_impact",
"rare_probability_medium_impact",
"rare_probability_low_impact",
"unlikely_probability_high_impact",
"unlikely_probability_medium_impact",
"unlikely_probability_low_impact",
"moderate_probability_high_impact",
"moderate_probability_medium_impact",
"moderate_probability_low_impact",
"likely_probability_high_impact",
"likely_probability_medium_impact",
"likely_probability_low_impact",
"certain_probability_high_impact",
"certain_probability_medium_impact",
"certain_probability_low_impact",
],
)
def test_access_to_dynamic_classmethod_matched_by_regex_will_not_raise(class_method):
name = faker.unique.name()
instance = getattr(Risk, class_method)(risk_name=name)
assert instance.risk_name == name
assert isinstance(instance.get_risk_score(), RiskScore)
probability, _, impact, _ = class_method.split("_")
probability_value = RiskProbabilty[probability.upper()].value
impact_value = RiskImpact[impact.upper()].value
assert instance.get_risk_score() == RiskScore.get_risk_score(int(probability_value / 100 * impact_value))
|
the-stack_0_10042 | # Author: Hamzeh Alsalhi <[email protected]>
#
# License: BSD 3 clause
import numpy as np
import scipy.sparse as sp
import array
from . import check_random_state
from ._random import sample_without_replacement
__all__ = ["sample_without_replacement"]
def _random_choice_csc(n_samples, classes, class_probability=None, random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of \
shape (n_classes,), default=None
Class distribution of each column. If None, uniform distribution is
assumed.
random_state : int, RandomState instance or None, default=None
Controls the randomness of the sampled classes.
See :term:`Glossary <random_state>`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array("i")
indices = array.array("i")
indptr = array.array("i", [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != "i":
raise ValueError("class dtype %s is not supported" % classes[j].dtype)
classes[j] = classes[j].astype(np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if not np.isclose(np.sum(class_prob_j), 1.0):
raise ValueError(
"Probability array at index {0} does not sum to one".format(j)
)
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError(
"classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(
j, classes[j].shape[0], class_prob_j.shape[0]
)
)
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(
n_population=n_samples, n_samples=nnz, random_state=random_state
)
indices.extend(ind_sample)
# Normalize probabilities for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = class_probability_nz / np.sum(
class_probability_nz
)
classes_ind = np.searchsorted(
class_probability_nz_norm.cumsum(), rng.rand(nnz)
)
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr), (n_samples, len(classes)), dtype=int)
|
the-stack_0_10043 | # Development specific settings
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'postgres',
'USER': 'postgres',
'HOST': 'db',
'PORT': 5432,
}
}
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
ALLOWED_HOSTS = ['0.0.0.0']
|
the-stack_0_10046 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
post_ongoing_update.py
Script to post an ongoing books update on tumblr.
"""
import datetime
import json
import random
import sys
import traceback
from optparse import OptionParser
from twitter import TwitterHTTPError
from gluon import *
from applications.zcomx.modules.creators import Creator
from applications.zcomx.modules.stickon.dal import RecordGenerator
from applications.zcomx.modules.facebook import \
Authenticator as FbAuthenticator, \
FacebookAPIError, \
Poster as FbPoster, \
TextDataPreparer as FbTextDataPreparer
from applications.zcomx.modules.social_media import OngoingPost
from applications.zcomx.modules.tumblr import \
Authenticator, \
Poster, \
TextDataPreparer, \
postable_activity_log_ids
from applications.zcomx.modules.tweeter import \
Authenticator as TwAuthenticator, \
Poster as TwPoster, \
TextDataPreparer as TwTextDataPreparer, \
creators_in_ongoing_post
from applications.zcomx.modules.zco import \
IN_PROGRESS, \
SITE_NAME
from applications.zcomx.modules.logger import set_cli_logging
VERSION = 'Version 0.1'
def post_on_facebook(ongoing_post):
"""Post on facebook
Args:
ongoing_post: OngoingPost instance
Returns:
str, facebook post id
"""
LOG.debug(
'Creating facebook posting for date: %s', str(ongoing_post.post_date))
settings = current.app.local_settings
credentials = {
'email': settings.facebook_email,
'password': settings.facebook_password,
'client_id': settings.facebook_client_id,
'redirect_uri': settings.facebook_redirect_uri,
'page_name': settings.facebook_page_name
}
client = FbAuthenticator(credentials).authenticate()
poster = FbPoster(client)
facebook_data = {'tumblr_post_id': ongoing_post.tumblr_post_id}
text_data = FbTextDataPreparer(facebook_data).data()
error = None
try:
result = poster.post_text(text_data)
except FacebookAPIError as err:
error = err
result = {}
if 'id' not in result:
LOG.error(
'Facebook post failed for ongoing_post: %s', ongoing_post.id
)
LOG.error(
'Fix: post_ongoing_update.py --facebook %s', str(ongoing_post.date)
)
if error:
LOG.error(err)
return
post_id = result['id']
LOG.debug('post_id: %s', post_id)
return post_id
def post_on_tumblr(ongoing_post):
"""Post on tumblr
Args:
ongoing_post: OngoingPost instance
Returns:
str, tumblr posting id
"""
LOG.debug(
'Creating tumblr posting for date: %s', str(ongoing_post.post_date))
settings = current.app.local_settings
credentials = {
'consumer_key': settings.tumblr_consumer_key,
'consumer_secret': settings.tumblr_consumer_secret,
'oauth_token': settings.tumblr_oauth_token,
'oauth_secret': settings.tumblr_oauth_secret,
}
client = Authenticator(credentials).authenticate()
poster = Poster(client)
query = (db.activity_log.ongoing_post_id == ongoing_post.id)
generator = RecordGenerator(query)
text_data = TextDataPreparer(ongoing_post.post_date, generator).data()
if settings.tumblr_post_state:
text_data['state'] = settings.tumblr_post_state
result = poster.post_text(settings.tumblr_username, text_data)
if 'id' not in result:
LOG.error(
'Tumblr ongoing post failed for date: %s',
str(ongoing_post.post_date)
)
# Try to get an error message.
if 'meta' in result:
if 'status' in result['meta'] and 'msg' in result['meta']:
LOG.error(
'Status: %s, msg: %s',
result['meta']['status'],
result['meta']['msg']
)
if 'response' in result and 'errors' in result['response']:
for error in result['response']['errors']:
LOG.error(error)
return
post_id = result['id']
LOG.debug('post_id: %s', post_id)
return post_id
def post_on_twitter(ongoing_post):
"""Post on twitter
Args:
ongoing_post: OngoingPost instance
Returns:
str, twitter posting id
"""
LOG.debug(
'Creating twitter posting for date: %s', str(ongoing_post.post_date))
settings = current.app.local_settings
credentials = {
'consumer_key': settings.twitter_consumer_key,
'consumer_secret': settings.twitter_consumer_secret,
'oauth_token': settings.twitter_oauth_token,
'oauth_secret': settings.twitter_oauth_secret,
}
client = TwAuthenticator(credentials).authenticate()
poster = TwPoster(client)
creators = [] # [{'name': 'Joe Smoe', 'twitter': '@joesmoe'},...]
for creator_id in creators_in_ongoing_post(ongoing_post):
try:
creator = Creator.from_id(creator_id)
except LookupError:
LOG.error('Creator not found, id: %s', creator_id)
continue
creators.append({
'name': creator.name,
'twitter': creator.twitter,
})
# Shuffle creators so there is no alphabetical bias
random.shuffle(creators)
twitter_data = {
'ongoing_post': {
'creators': creators,
'tumblr_post_id': ongoing_post.tumblr_post_id,
},
'site': {'name': SITE_NAME},
}
text_data = TwTextDataPreparer(twitter_data).data()
error = None
try:
result = poster.post_text(text_data)
except TwitterHTTPError as err:
error = err
result = {}
if 'id' not in result:
LOG.error(
'Twitter post failed for ongoing_post: %s', ongoing_post.id
)
if error:
response_data = json.loads(error.response_data)
if 'errors' in response_data and response_data['errors']:
code = response_data['errors'][0]['code']
msg = response_data['errors'][0]['message']
LOG.error('Code: %s, msg: %s', code, msg)
return
post_id = result['id']
LOG.debug('post_id: %s', post_id)
return post_id
def get_ongoing_post(date, create=True):
"""Get the ongoing_post record for the given date.
Args:
date: datetime.date instance
create: If true, create an ongoing_post record if not found.
Returns:
OngoingPost instance
"""
key = dict(post_date=date)
try:
ongoing_post = OngoingPost.from_key(key)
except LookupError:
ongoing_post = None
if not ongoing_post and create:
ongoing_post = OngoingPost.from_add(key)
return ongoing_post
def man_page():
"""Print manual page-like help"""
print("""
USAGE
post_ongoing_update.py [OPTIONS] yyyy-mm-dd
OPTIONS
-f, --force
Post regardless if ongoing_post record indicates a post has already
been made (ie ongoing_post.tumblr_post_id and
ongoing_post.twitter_post_id are set)
--facebook
Post only on facebook.
-h, --help
Print a brief help.
--man
Print man page-like help.
-p --process-activity-logs
By default posts are made for existing ongoing_post records only
(matched on date) and no activity_log records are processed.
With this option an ongoing_post is created for the date if necessary,
and all activity_log records not yet associated with an ongoing_post
are associated with the new ongoing_post.
--tumblr
Post only on tumblr.
--twitter
Post only on twitter.
-v, --verbose
Print information messages to stdout.
--vv,
More verbose. Print debug messages to stdout.
""")
def main():
"""Main processing."""
usage = '%prog [options] YYYY-MM-DD'
parser = OptionParser(usage=usage, version=VERSION)
parser.add_option(
'-f', '--force',
action='store_true', dest='force', default=False,
help='Post regardles if ongoing post_ids exist.',
)
parser.add_option(
'--facebook',
action='store_true', dest='facebook', default=False,
help='Post only on facebook.',
)
parser.add_option(
'--man',
action='store_true', dest='man', default=False,
help='Display manual page-like help and exit.',
)
parser.add_option(
'-p', '--process-activity-logs',
action='store_true', dest='process_activity_logs', default=False,
help='Process activity_log records.',
)
parser.add_option(
'--tumblr',
action='store_true', dest='tumblr', default=False,
help='Post only on tumblr.',
)
parser.add_option(
'--twitter',
action='store_true', dest='twitter', default=False,
help='Post only on twitter.',
)
parser.add_option(
'-v', '--verbose',
action='store_true', dest='verbose', default=False,
help='Print messages to stdout.',
)
parser.add_option(
'--vv',
action='store_true', dest='vv', default=False,
help='More verbose.',
)
(options, args) = parser.parse_args()
if options.man:
man_page()
quit(0)
set_cli_logging(LOG, options.verbose, options.vv)
if len(args) != 1:
parser.print_help()
exit(1)
LOG.debug('Starting')
try:
date = datetime.datetime.strptime(args[0], '%Y-%m-%d').date()
except ValueError as err:
LOG.error('Invalid date: %s, %s', args[0], err)
exit(1)
if options.process_activity_logs:
activity_log_ids = postable_activity_log_ids()
if not activity_log_ids:
LOG.info('There are no postable activity_log records')
LOG.info('Nothing to do. Aborting')
exit(0)
ongoing_post = get_ongoing_post(date)
for activity_log_id in activity_log_ids:
query = (db.activity_log.id == activity_log_id)
db(query).update(ongoing_post_id=ongoing_post.id)
else:
ongoing_post = get_ongoing_post(date, create=False)
if not ongoing_post:
LOG.error('Ongoing post not found, date: %s', str(date))
exit(1)
services = []
if options.facebook:
services.append('facebook')
if options.tumblr:
services.append('tumblr')
if options.twitter:
services.append('twitter')
if not options.facebook and not options.tumblr and not options.twitter:
services = ['facebook', 'tumblr', 'twitter']
if 'tumblr' in services:
if ongoing_post.tumblr_post_id \
and ongoing_post.tumblr_post_id != IN_PROGRESS \
and not options.force:
LOG.warn(
'Ongoing_post has tumblr_post_id: %s',
ongoing_post.tumblr_post_id
)
LOG.warn('Refusing to post to tumblr without --force')
else:
tumblr_post_id = post_on_tumblr(ongoing_post)
if tumblr_post_id:
ongoing_post = OngoingPost.from_updated(
ongoing_post, dict(tumblr_post_id=tumblr_post_id))
if 'twitter' in services:
if ongoing_post.twitter_post_id \
and ongoing_post.twitter_post_id != IN_PROGRESS \
and not options.force:
LOG.warn(
'Ongoing_post has twitter_post_id: %s',
ongoing_post.twitter_post_id
)
LOG.warn('Refusing to post to twitter without --force')
else:
twitter_post_id = post_on_twitter(ongoing_post)
if twitter_post_id:
ongoing_post = OngoingPost.from_updated(
ongoing_post, dict(twitter_post_id=twitter_post_id))
if 'facebook' in services:
if not ongoing_post.tumblr_post_id \
or ongoing_post.tumblr_post_id == IN_PROGRESS:
LOG.error('Unable to post to facebook without a tumblr_post_id')
elif ongoing_post.facebook_post_id \
and ongoing_post.facebook_post_id != IN_PROGRESS \
and not options.force:
LOG.warn(
'Ongoing_post has facebook_post_id: %s',
ongoing_post.facebook_post_id
)
LOG.warn('Refusing to post to facebook without --force')
else:
facebook_post_id = post_on_facebook(ongoing_post)
if facebook_post_id:
ongoing_post = OngoingPost.from_updated(
ongoing_post, dict(facebook_post_id=facebook_post_id))
LOG.debug('Done')
if __name__ == '__main__':
# pylint: disable=broad-except
try:
main()
except SystemExit:
pass
except Exception:
traceback.print_exc(file=sys.stderr)
exit(1)
|
the-stack_0_10047 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from datetime import date
from workalendar.core import WesternCalendar, ChristianMixin
from ..registry_tools import iso_register
@iso_register('NL')
class Netherlands(WesternCalendar, ChristianMixin):
'Netherlands'
include_good_friday = True
include_easter_sunday = True
include_easter_monday = True
include_ascension = True
include_whit_sunday = True
include_whit_monday = True
include_boxing_day = True
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(5, 5, "Liberation Day"),
)
def get_king_queen_day(self, year):
"""27 April unless this is a Sunday in which case it is the 26th
Before 2013 it was called Queensday, falling on
30 April, unless this is a Sunday in which case it is the 29th.
"""
if year > 2013:
if date(year, 4, 27).weekday() != 6:
return date(year, 4, 27), "King's day"
else:
return date(year, 4, 26), "King's day"
else:
if date(year, 4, 30).weekday() != 6:
return date(year, 4, 30), "Queen's day"
else:
return date(year, 4, 29), "Queen's day"
def get_variable_days(self, year):
days = super(Netherlands, self).get_variable_days(year)
days.append(self.get_king_queen_day(year))
return days
|
the-stack_0_10048 | """I/O format for MongoDB
This plugin is designed with data monitoring in mind, to put smaller
amounts of extracted data into a database for quick access. However
it should work with any plugin.
Note that there is no check to make sure the 16MB document size
limit is respected!
"""
import strax
import numpy as np
from pymongo import MongoClient, DESCENDING
from strax import StorageFrontend, StorageBackend, Saver
from datetime import datetime
from pytz import utc as py_utc
from warnings import warn
from sys import getsizeof
export, __all__ = strax.exporter()
# Some data is stored in the buffer. Delete when either of these values
# are exceeded
DEFAULT_MONGO_BACKEND_BUFFER_MB = 200
DEFAULT_MONGO_BACKEND_BUFFER_NRUNS = 5
@export
class MongoBackend(StorageBackend):
"""Mongo storage backend"""
def __init__(self, uri, database, col_name=None):
"""
Backend for reading/writing data from Mongo
:param uri: Mongo url (with pw and username)
:param database: name of database (str)
:param col_name: collection name (str) to look for data
"""
self.client = MongoClient(uri)
self.db = self.client[database]
self.col_name = col_name
# Attributes for the chunks-buffer
self.chunks_registry = {}
self._buffered_backend_keys = []
self._buff_mb = DEFAULT_MONGO_BACKEND_BUFFER_MB
self._buff_nruns = DEFAULT_MONGO_BACKEND_BUFFER_NRUNS
def _read_chunk(self, backend_key, chunk_info, dtype, compressor):
"""See strax.Backend"""
chunk_i = chunk_info["chunk_i"]
registry_key = backend_key + str(chunk_i)
# Build the chunk-registry if not done already, also rebuild if
# the key is not in the registry (will fail below if also not
# there on rebuild).
if registry_key not in self.chunks_registry.keys():
self._build_chunk_registry(backend_key)
# Unpack info about this chunk from the query. Return empty if
# not available. Use a *string* in the registry to lookup the
# chunk-data (like we do in _build_chunk_registry).
doc = self.chunks_registry.get(registry_key, None)
if doc is None:
# Did not find the data. NB: can be that the query is off in
# the _build_chunk_registry. In case you end up here but did
# not expect that, double check that self.chunks_registry is
# not an empty dict!
raise ValueError(
f'Metadata claims chunk{chunk_i} exists but it is unknown to '
f'the chunks_registry')
else:
chunk_doc = doc.get('data', None)
if chunk_doc is None:
raise ValueError(
f'Doc for chunk_{chunk_i} in wrong format:\n{doc}')
# Convert JSON to numpy
chunk_len = len(chunk_doc)
result = np.zeros(chunk_len, dtype=dtype)
for i in range(chunk_len):
for key in np.dtype(dtype).names:
result[i][key] = chunk_doc[i][key]
return result
def _saver(self, key, metadata):
"""See strax.Backend"""
# Use the key to make a collection otherwise, use the backend-key
col = self.db[self.col_name if self.col_name is not None else str(key)]
return MongoSaver(key, metadata, col)
def get_metadata(self, key):
"""See strax.Backend"""
query = backend_key_to_query(key)
# Make sure to get the last of the meta-data docs. Otherwise we
# might be getting a previously failed document. Sort argument
# should be obsolete (due to the self.col.delete_many in the
# MongoSaver) but rather safe than sorry.
doc = self.db[self.col_name].find_one({
**query, 'metadata': {"$exists": True}},
# **query, 'provides_meta': True}, <-change to this after TTL has flushed
sort=[('write_time', DESCENDING)])
if doc and 'metadata' in doc:
return doc['metadata']
raise strax.DataNotAvailable
def _build_chunk_registry(self, backend_key):
"""
Build chunk info in a single registry using only one query to
the database. This is much faster as one does not have to do
n-chunk queries to the database. Just one will do. As the
documents-size is limited to 16 MB, it's unlikely that we will
run into memory issues (that we otherwise would not run into).
:param backend_key: strax.DataKey to query the collection for
"""
query = backend_key_to_query(backend_key)
chunks_registry = self.db[self.col_name].find(
{**query, 'chunk_i': {'$exists': True}},
# {**query, 'provides_meta': False}, <-change to this after TTL has flushed
{"chunk_i": 1, "data": 1})
# We are going to convert this to a dictionary as that is
# easier to lookup
for doc in chunks_registry:
chunk_key = doc.get('chunk_i', None)
if chunk_key is None:
# Should not happen because of the projection in find
# but let's double check:
raise ValueError(
f'Projection failed, got doc with no "chunk_i":\n{doc}')
# Update our registry with this chunks info. Use chunk_i as
# chunk_key. Make it a *string* to avoid potential key-error
# issues or json-encoding headaches.
self.chunks_registry[backend_key + str(chunk_key)] = doc.copy()
# Some bookkeeping to make sure we don't buffer too much in this
# backend. We still need to return at least one hence the 'and'.
# See: https://github.com/AxFoundation/strax/issues/346
if backend_key not in self._buffered_backend_keys:
self._buffered_backend_keys.append(backend_key)
while ((getsizeof(self.chunks_registry) / 1e6 > self._buff_mb
and len(self._buffered_backend_keys) > 1)
or len(self._buffered_backend_keys) > self._buff_nruns):
self._clean_first_key_from_registry()
def _clean_first_key_from_registry(self):
"""
Remove the first item in the self.buffered_keys and all the
associated keys in the self.chunks_registry to limit RAM-usage
"""
# only clean the first entry from the list
to_clean = self._buffered_backend_keys[0]
for registry_key in list(self.chunks_registry.keys()):
if to_clean in registry_key:
del self.chunks_registry[registry_key]
del self._buffered_backend_keys[0]
@export
class MongoFrontend(StorageFrontend):
"""MongoDB storage frontend"""
def __init__(self, uri, database, col_name=None, *args, **kwargs):
"""
MongoFrontend for reading/writing data from Mongo
:param uri: Mongo url (with pw and username)
:param database: name of database (str)
:param col_name: collection name (str) to look for data
:param args: init for StorageFrontend
:param kwargs: init for StorageFrontend
"""
super().__init__(*args, **kwargs)
self.client = MongoClient(uri)
self.db = self.client[database]
self.backends = [MongoBackend(uri, database, col_name=col_name)]
self.col_name = col_name
def _find(self, key, write, allow_incomplete, fuzzy_for,
fuzzy_for_options):
"""See strax.Frontend"""
if write:
return self.backends[0].__class__.__name__, str(key)
query = backend_key_to_query(str(key))
if self.db[self.col_name].count_documents(query):
self.log.debug(f"{key} is in cache.")
return self.backends[0].__class__.__name__, str(key)
self.log.debug(f"{key} is NOT in cache.")
raise strax.DataNotAvailable
@export
class MongoSaver(Saver):
allow_rechunk = False
def __init__(self, key, metadata, col):
"""
Mongo saver
:param key: strax.Datakey
:param metadata: metadata to save belonging to data
:param col: collection (NB! pymongo collection object) of mongo
instance to write to
"""
super().__init__(metadata)
self.col = col
# All meta_documents should have the key to query against
basic_meta = backend_key_to_query(key).copy()
# Start with a clean sheet, we are just going to overwrite
self.col.delete_many(basic_meta)
# Add datetime objects as candidates for TTL collections. Either
# can be used according to the preference of the user to index.
# Two entries can be used:
# 1. The time of writing.
# 2. The time of data taking.
basic_meta['write_time'] = datetime.now(py_utc)
# The run_start_time below is a placeholder and will be updated
# in the _save_chunk_metadata for the first chunk. Nevertheless
# we need an object in case there e.g. is no chunk.
basic_meta['run_start_time'] = datetime.now(py_utc)
# Add flag to doc that we are providing the metadata
basic_meta['provides_meta'] = True
# If available later update with this value:
self.run_start = None
# This info should be added to all of the associated documents
self.basic_md = basic_meta
# For the metadata copy this too:
meta_data = basic_meta.copy()
meta_data['metadata'] = self.md
# Save object_ids for fast querying and updates
self.id_md = self.col.insert_one(meta_data).inserted_id
# Also save all the chunks
self.ids_chunk = {}
def _save_chunk(self, data, chunk_info, executor=None):
"""see strax.Saver"""
chunk_i = chunk_info['chunk_i']
if getattr(data, 'nbytes') > 10_000_000:
warn('Inserting documents of size > 10 MB, this is getting '
'close to the 16 MB document size in mongo',
UserWarning)
aggregate_data = []
# Remove the numpy structures and parse the data. The dtype
# information is saved with the metadata so don't worry
for row in data:
ins = {}
for key in list(data.dtype.names):
ins[key] = row[key]
ins = remove_np(ins)
aggregate_data.append(ins)
# Get the document to update, if none available start a new one
# for this chunk
chunk_id = self.ids_chunk.get(chunk_i, None)
# We can fail here if the document is too large to be written
# out to mongo. One could do a try: except
# pymongo.errors.WriteError: pass, but that potentially leads to
# abuse of a Mongo instance going unnoticed.
if chunk_id is not None:
# In principle this should not end up here as each chunk
# should be it's own document unless you re-chunk
self.col.update_one({'_id': chunk_id},
{'$push': {f'data': aggregate_data}})
else:
# Start a new document, update it with the proper information
doc = self.basic_md.copy()
doc['write_time'] = datetime.now(py_utc)
doc['chunk_i'] = chunk_i
doc["data"] = aggregate_data
doc['provides_meta'] = False
chunk_id = self.col.insert_one(doc).inserted_id
self.ids_chunk[chunk_i] = chunk_id
return dict(), None
def _save_chunk_metadata(self, chunk_info):
"""see strax.Saver"""
# For the first chunk we get the run_start_time and update the
# run-metadata file
if int(chunk_info['chunk_i']) == 0:
self.run_start = datetime.fromtimestamp(
chunk_info['start']/1e9).replace(tzinfo=py_utc)
self.col.update_one({'_id': self.id_md},
{'$addToSet': {'metadata.chunks': chunk_info}})
def _close(self):
"""see strax.Saver"""
# First update the run-starts of all of the chunk-documents as
# this is a TTL index-candidate
if self.run_start is not None:
update = {'run_start_time': self.run_start}
query = {k: v for k, v in self.basic_md.items()
if k in ('number', 'data_type', 'lineage_hash')}
self.col.update_many(query, {'$set': update})
# Update the metadata
update = {f'metadata.{k}': v
for k, v in self.md.items()
if k in ('writing_ended', 'exception')}
# Also update all of the chunk-documents with the run_start_time
self.col.update_one({'_id': self.id_md}, {'$set': update})
def backend_key_to_query(backend_key):
"""Convert backend key to queryable dictionary"""
n, d, l = backend_key.split('-')
return {'number': int(n), 'data_type': d, 'lineage_hash': l}
def remove_np(dictin):
"""Remove numpy types from a dict so it can be inserted into
mongo."""
if isinstance(dictin, dict):
result = {}
for k in dictin.keys():
result[k] = remove_np(dictin[k])
elif isinstance(dictin, (np.ndarray, list)):
result = []
for k in dictin:
result.append(remove_np(k))
elif isinstance(dictin, np.integer):
return int(dictin)
elif isinstance(dictin, np.floating):
return float(dictin)
else:
return dictin
return result
|
the-stack_0_10049 | import numpy as np
import matplotlib.pyplot as plt
print("Running plot_runtime script..")
maindir = 'res/runtime/'
filename = 'runtime_results.csv'
file = maindir + filename
print("Reading input data from " + file + "..")
data = np.genfromtxt(file, delimiter=',', skip_header=1)
print("Input completed..")
kind = ['static', 'dynamic', 'guided']
chunksize=[1, 2, 4, 8, 16, 32, 64]
avg_time_loop1 = np.zeros((len(chunksize),len(kind)))
avg_time_loop2 = np.zeros((len(chunksize),len(kind)))
for num, label in enumerate(kind):
# read kind type column
blocks = data[:,1]
# get data that only match the current kind type
kind_data = data[blocks==num+1,:]
for idx in range(len(chunksize)):
# get data that only match the current chunksize
blocks = kind_data[:,2]
chunk_data = kind_data[blocks==chunksize[idx],:]
# take the average time for each chunksize
avg_time_loop1[idx,num] = np.mean(chunk_data[:,5])
avg_time_loop2[idx,num] = np.mean(chunk_data[:,7])
#time vs chunksize
plt.figure()
plt.plot(chunksize, avg_time_loop1[:,0], '-*', label='Static')
plt.plot(chunksize, avg_time_loop1[:,1], '-^', label='Dynamic')
plt.plot(chunksize, avg_time_loop1[:,2], '-<', label='Guided')
plt.xlabel('Chunksize')
plt.ylabel('Time (s)')
# plt.legend(loc=2)
plt.legend()
plt.grid(True)
plt.savefig(maindir + 'runtime_loop1.eps', format='eps', dpi=1000)
plt.close()
print("Execution time for plot for loop1 completed..")
#time vs chunksize
plt.figure()
plt.plot(chunksize, avg_time_loop2[:,0], '-*', label='Static')
plt.plot(chunksize, avg_time_loop2[:,1], '-^', label='Dynamic')
plt.plot(chunksize, avg_time_loop2[:,2], '-<', label='Guided')
plt.xlabel('Chunksize')
plt.ylabel('Time (s)')
# plt.legend(loc=2)
plt.legend()
plt.grid(True)
plt.savefig(maindir + 'runtime_loop2.eps', format='eps', dpi=1000)
plt.close()
print("Execution time for plot for loop2 completed..")
print("plot_runtime script completed..")
|
the-stack_0_10050 | """Gunicorn configuration file."""
import multiprocessing
import environ
from koku.feature_flags import UNLEASH_CLIENT
from koku.probe_server import BasicProbeServer
from koku.probe_server import start_probe_server
ENVIRONMENT = environ.Env()
SOURCES = ENVIRONMENT.bool("SOURCES", default=False)
CLOWDER_PORT = "8000"
if ENVIRONMENT.bool("CLOWDER_ENABLED", default=False):
from app_common_python import LoadedConfig
CLOWDER_PORT = LoadedConfig.publicPort
if ENVIRONMENT.bool("MASU", default=False) or ENVIRONMENT.bool("SOURCES", default=False):
CLOWDER_PORT = LoadedConfig.privatePort
bind = f"0.0.0.0:{CLOWDER_PORT}"
cpu_resources = ENVIRONMENT.int("POD_CPU_LIMIT", default=multiprocessing.cpu_count())
workers = 1 if SOURCES else cpu_resources * 2 + 1
timeout = ENVIRONMENT.int("TIMEOUT", default=90)
loglevel = ENVIRONMENT.get_value("GUNICORN_LOG_LEVEL", default="INFO")
graceful_timeout = ENVIRONMENT.int("GRACEFUL_TIMEOUT", default=180)
gunicorn_threads = ENVIRONMENT.bool("GUNICORN_THREADS", default=False)
if gunicorn_threads:
threads = cpu_resources * 2 + 1
# Server Hooks
def on_starting(server):
"""Called just before the main process is initialized."""
httpd = start_probe_server(BasicProbeServer, server.log)
httpd.RequestHandlerClass.ready = True
def post_fork(server, worker):
"""Called just after a worker has been forked."""
UNLEASH_CLIENT.unleash_instance_id += f"_pid_{worker.pid}"
worker.log.info("Initializing UNLEASH_CLIENT for gunicorn worker.")
UNLEASH_CLIENT.initialize_client()
def worker_exit(server, worker):
"""Called just after a worker has been exited, in the worker process."""
worker.log.info("Shutting down UNLEASH_CLIENT for gunicorn worker.")
UNLEASH_CLIENT.destroy()
|
the-stack_0_10051 | import requests
import time
class Facebook:
def __init__(self, config, permutations_list):
# 1000 ms
self.delay = config['plateform']['facebook']['rate_limit'] / 1000
# https://facebook.com/{username}
self.format = config['plateform']['facebook']['format']
# facebook usernames are not case sensitive
self.permutations_list = [perm.lower() for perm in permutations_list]
# social
self.type = config['plateform']['facebook']['type']
# Generate all potential facebook usernames
def possible_usernames(self):
possible_usernames = []
for permutation in self.permutations_list:
possible_usernames.append(self.format.format(
permutation = permutation,
))
return possible_usernames
def search(self):
facebook_usernames = {
"type": self.type,
"accounts": []
}
possible_usernames_list = self.possible_usernames()
for username in possible_usernames_list:
try:
r = requests.get(username, timeout=5)
except requests.ConnectionError:
print("failed to connect to facebook")
# If the account exists
if r.status_code == 200:
facebook_usernames["accounts"].append({"value": username})
time.sleep(self.delay)
return facebook_usernames |
the-stack_0_10052 | # TODO nits:
# Get rid of asserts that are the caller's fault.
# Docstrings (e.g. ABCs).
import abc
from abc import abstractmethod, abstractproperty
import collections
import functools
import re as stdlib_re # Avoid confusion with the re we export.
import sys
import types
try:
import collections.abc as collections_abc
except ImportError:
import collections as collections_abc # Fallback for PY3.2.
# Please keep __all__ alphabetized within each category.
__all__ = [
# Super-special typing primitives.
'Any',
'Callable',
'Generic',
'Optional',
'TypeVar',
'Union',
'Tuple',
# ABCs (from collections.abc).
'AbstractSet', # collections.abc.Set.
'ByteString',
'Container',
'Hashable',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'Mapping',
'MappingView',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Sequence',
'Sized',
'ValuesView',
# Structural checks, a.k.a. protocols.
'Reversible',
'SupportsAbs',
'SupportsFloat',
'SupportsInt',
'SupportsRound',
# Concrete collection types.
'Dict',
'List',
'Set',
'NamedTuple', # Not really a type.
'Generator',
# One-off things.
'AnyStr',
'cast',
'get_type_hints',
'no_type_check',
'no_type_check_decorator',
'overload',
# Submodules.
'io',
're',
]
def _qualname(x):
if sys.version_info[:2] >= (3, 3):
return x.__qualname__
else:
# Fall back to just name.
return x.__name__
class TypingMeta(type):
"""Metaclass for every type defined below.
This overrides __new__() to require an extra keyword parameter
'_root', which serves as a guard against naive subclassing of the
typing classes. Any legitimate class defined using a metaclass
derived from TypingMeta (including internal subclasses created by
e.g. Union[X, Y]) must pass _root=True.
This also defines a dummy constructor (all the work is done in
__new__) and a nicer repr().
"""
_is_protocol = False
def __new__(cls, name, bases, namespace, *, _root=False):
if not _root:
raise TypeError("Cannot subclass %s" %
(', '.join(map(_type_repr, bases)) or '()'))
return super().__new__(cls, name, bases, namespace)
def __init__(self, *args, **kwds):
pass
def _eval_type(self, globalns, localns):
"""Override this in subclasses to interpret forward references.
For example, Union['C'] is internally stored as
Union[_ForwardRef('C')], which should evaluate to _Union[C],
where C is an object found in globalns or localns (searching
localns first, of course).
"""
return self
def _has_type_var(self):
return False
def __repr__(self):
return '%s.%s' % (self.__module__, _qualname(self))
class Final:
"""Mix-in class to prevent instantiation."""
__slots__ = ()
def __new__(self, *args, **kwds):
raise TypeError("Cannot instantiate %r" % self.__class__)
class _ForwardRef(TypingMeta):
"""Wrapper to hold a forward reference."""
def __new__(cls, arg):
if not isinstance(arg, str):
raise TypeError('ForwardRef must be a string -- got %r' % (arg,))
try:
code = compile(arg, '<string>', 'eval')
except SyntaxError:
raise SyntaxError('ForwardRef must be an expression -- got %r' %
(arg,))
self = super().__new__(cls, arg, (), {}, _root=True)
self.__forward_arg__ = arg
self.__forward_code__ = code
self.__forward_evaluated__ = False
self.__forward_value__ = None
typing_globals = globals()
frame = sys._getframe(1)
while frame is not None and frame.f_globals is typing_globals:
frame = frame.f_back
assert frame is not None
self.__forward_frame__ = frame
return self
def _eval_type(self, globalns, localns):
if not isinstance(localns, dict):
raise TypeError('ForwardRef localns must be a dict -- got %r' %
(localns,))
if not isinstance(globalns, dict):
raise TypeError('ForwardRef globalns must be a dict -- got %r' %
(globalns,))
if not self.__forward_evaluated__:
if globalns is None and localns is None:
globalns = localns = {}
elif globalns is None:
globalns = localns
elif localns is None:
localns = globalns
self.__forward_value__ = _type_check(
eval(self.__forward_code__, globalns, localns),
"Forward references must evaluate to types.")
self.__forward_evaluated__ = True
return self.__forward_value__
def __instancecheck__(self, obj):
raise TypeError("Forward references cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self.__forward_evaluated__:
globalns = self.__forward_frame__.f_globals
localns = self.__forward_frame__.f_locals
try:
self._eval_type(globalns, localns)
except NameError:
return False # Too early.
return issubclass(cls, self.__forward_value__)
def __repr__(self):
return '_ForwardRef(%r)' % (self.__forward_arg__,)
class _TypeAlias:
"""Internal helper class for defining generic variants of concrete types.
Note that this is not a type; let's call it a pseudo-type. It can
be used in instance and subclass checks, e.g. isinstance(m, Match)
or issubclass(type(m), Match). However, it cannot be itself the
target of an issubclass() call; e.g. issubclass(Match, C) (for
some arbitrary class C) raises TypeError rather than returning
False.
"""
__slots__ = ('name', 'type_var', 'impl_type', 'type_checker')
def __new__(cls, *args, **kwds):
"""Constructor.
This only exists to give a better error message in case
someone tries to subclass a type alias (not a good idea).
"""
if (len(args) == 3 and
isinstance(args[0], str) and
isinstance(args[1], tuple)):
# Close enough.
raise TypeError("A type alias cannot be subclassed")
return object.__new__(cls)
def __init__(self, name, type_var, impl_type, type_checker):
"""Initializer.
Args:
name: The name, e.g. 'Pattern'.
type_var: The type parameter, e.g. AnyStr, or the
specific type, e.g. str.
impl_type: The implementation type.
type_checker: Function that takes an impl_type instance.
and returns a value that should be a type_var instance.
"""
assert isinstance(name, str), repr(name)
assert isinstance(type_var, type), repr(type_var)
assert isinstance(impl_type, type), repr(impl_type)
assert not isinstance(impl_type, TypingMeta), repr(impl_type)
self.name = name
self.type_var = type_var
self.impl_type = impl_type
self.type_checker = type_checker
def __repr__(self):
return "%s[%s]" % (self.name, _type_repr(self.type_var))
def __getitem__(self, parameter):
assert isinstance(parameter, type), repr(parameter)
if not isinstance(self.type_var, TypeVar):
raise TypeError("%s cannot be further parameterized." % self)
if self.type_var.__constraints__:
if not issubclass(parameter, Union[self.type_var.__constraints__]):
raise TypeError("%s is not a valid substitution for %s." %
(parameter, self.type_var))
return self.__class__(self.name, parameter,
self.impl_type, self.type_checker)
def __instancecheck__(self, obj):
raise TypeError("Type aliases cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if isinstance(cls, _TypeAlias):
# Covariance. For now, we compare by name.
return (cls.name == self.name and
issubclass(cls.type_var, self.type_var))
else:
# Note that this is too lenient, because the
# implementation type doesn't carry information about
# whether it is about bytes or str (for example).
return issubclass(cls, self.impl_type)
def _has_type_var(t):
return t is not None and isinstance(t, TypingMeta) and t._has_type_var()
def _eval_type(t, globalns, localns):
if isinstance(t, TypingMeta):
return t._eval_type(globalns, localns)
else:
return t
def _type_check(arg, msg):
"""Check that the argument is a type, and return it.
As a special case, accept None and return type(None) instead.
Also, _TypeAlias instances (e.g. Match, Pattern) are acceptable.
The msg argument is a human-readable error message, e.g.
"Union[arg, ...]: arg should be a type."
We append the repr() of the actual value (truncated to 100 chars).
"""
if arg is None:
return type(None)
if isinstance(arg, str):
arg = _ForwardRef(arg)
if not isinstance(arg, (type, _TypeAlias)):
raise TypeError(msg + " Got %.100r." % (arg,))
return arg
def _type_repr(obj):
"""Return the repr() of an object, special-casing types.
If obj is a type, we return a shorter version than the default
type.__repr__, based on the module and qualified name, which is
typically enough to uniquely identify a type. For everything
else, we fall back on repr(obj).
"""
if isinstance(obj, type) and not isinstance(obj, TypingMeta):
if obj.__module__ == 'builtins':
return _qualname(obj)
else:
return '%s.%s' % (obj.__module__, _qualname(obj))
else:
return repr(obj)
class AnyMeta(TypingMeta):
"""Metaclass for Any."""
def __new__(cls, name, bases, namespace, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
return self
def __instancecheck__(self, obj):
raise TypeError("Any cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not isinstance(cls, type):
return super().__subclasscheck__(cls) # To TypeError.
return True
class Any(Final, metaclass=AnyMeta, _root=True):
"""Special type indicating an unconstrained type.
- Any object is an instance of Any.
- Any class is a subclass of Any.
- As a special case, Any and object are subclasses of each other.
"""
__slots__ = ()
class TypeVar(TypingMeta, metaclass=TypingMeta, _root=True):
"""Type variable.
Usage::
T = TypeVar('T') # Can be anything
A = TypeVar('A', str, bytes) # Must be str or bytes
Type variables exist primarily for the benefit of static type
checkers. They serve as the parameters for generic types as well
as for generic function definitions. See class Generic for more
information on generic types. Generic functions work as follows:
def repeat(x: T, n: int) -> Sequence[T]:
'''Return a list containing n references to x.'''
return [x]*n
def longest(x: A, y: A) -> A:
'''Return the longest of two strings.'''
return x if len(x) >= len(y) else y
The latter example's signature is essentially the overloading
of (str, str) -> str and (bytes, bytes) -> bytes. Also note
that if the arguments are instances of some subclass of str,
the return type is still plain str.
At runtime, isinstance(x, T) will raise TypeError. However,
issubclass(C, T) is true for any class C, and issubclass(str, A)
and issubclass(bytes, A) are true, and issubclass(int, A) is
false.
Type variables may be marked covariant or contravariant by passing
covariant=True or contravariant=True. See PEP 484 for more
details. By default type variables are invariant.
Type variables can be introspected. e.g.:
T.__name__ == 'T'
T.__constraints__ == ()
T.__covariant__ == False
T.__contravariant__ = False
A.__constraints__ == (str, bytes)
"""
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False):
self = super().__new__(cls, name, (Final,), {}, _root=True)
if covariant and contravariant:
raise ValueError("Bivariant type variables are not supported.")
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
if constraints and bound is not None:
raise TypeError("Constraints cannot be combined with bound=...")
if constraints and len(constraints) == 1:
raise TypeError("A single constraint is not allowed")
msg = "TypeVar(name, constraint, ...): constraints must be types."
self.__constraints__ = tuple(_type_check(t, msg) for t in constraints)
if bound:
self.__bound__ = _type_check(bound, "Bound must be a type.")
else:
self.__bound__ = None
return self
def _has_type_var(self):
return True
def __repr__(self):
if self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __instancecheck__(self, instance):
raise TypeError("Type variables cannot be used with isinstance().")
def __subclasscheck__(self, cls):
# TODO: Make this raise TypeError too?
if cls is self:
return True
if cls is Any:
return True
if self.__bound__ is not None:
return issubclass(cls, self.__bound__)
if self.__constraints__:
return any(issubclass(cls, c) for c in self.__constraints__)
return True
# Some unconstrained type variables. These are used by the container types.
T = TypeVar('T') # Any type.
KT = TypeVar('KT') # Key type.
VT = TypeVar('VT') # Value type.
T_co = TypeVar('T_co', covariant=True) # Any type covariant containers.
V_co = TypeVar('V_co', covariant=True) # Any type covariant containers.
VT_co = TypeVar('VT_co', covariant=True) # Value type covariant containers.
T_contra = TypeVar('T_contra', contravariant=True) # Ditto contravariant.
# A useful type variable with constraints. This represents string types.
# TODO: What about bytearray, memoryview?
AnyStr = TypeVar('AnyStr', bytes, str)
class UnionMeta(TypingMeta):
"""Metaclass for Union."""
def __new__(cls, name, bases, namespace, parameters=None, _root=False):
if parameters is None:
return super().__new__(cls, name, bases, namespace, _root=_root)
if not isinstance(parameters, tuple):
raise TypeError("Expected parameters=<tuple>")
# Flatten out Union[Union[...], ...] and type-check non-Union args.
params = []
msg = "Union[arg, ...]: each arg must be a type."
for p in parameters:
if isinstance(p, UnionMeta):
params.extend(p.__union_params__)
else:
params.append(_type_check(p, msg))
# Weed out strict duplicates, preserving the first of each occurrence.
all_params = set(params)
if len(all_params) < len(params):
new_params = []
for t in params:
if t in all_params:
new_params.append(t)
all_params.remove(t)
params = new_params
assert not all_params, all_params
# Weed out subclasses.
# E.g. Union[int, Employee, Manager] == Union[int, Employee].
# If Any or object is present it will be the sole survivor.
# If both Any and object are present, Any wins.
# Never discard type variables, except against Any.
# (In particular, Union[str, AnyStr] != AnyStr.)
all_params = set(params)
for t1 in params:
if t1 is Any:
return Any
if isinstance(t1, TypeVar):
continue
if any(issubclass(t1, t2)
for t2 in all_params - {t1} if not isinstance(t2, TypeVar)):
all_params.remove(t1)
# It's not a union if there's only one type left.
if len(all_params) == 1:
return all_params.pop()
# Create a new class with these params.
self = super().__new__(cls, name, bases, {}, _root=True)
self.__union_params__ = tuple(t for t in params if t in all_params)
self.__union_set_params__ = frozenset(self.__union_params__)
return self
def _eval_type(self, globalns, localns):
p = tuple(_eval_type(t, globalns, localns)
for t in self.__union_params__)
if p == self.__union_params__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
p, _root=True)
def _has_type_var(self):
if self.__union_params__:
for t in self.__union_params__:
if _has_type_var(t):
return True
return False
def __repr__(self):
r = super().__repr__()
if self.__union_params__:
r += '[%s]' % (', '.join(_type_repr(t)
for t in self.__union_params__))
return r
def __getitem__(self, parameters):
if self.__union_params__ is not None:
raise TypeError(
"Cannot subscript an existing Union. Use Union[u, t] instead.")
if parameters == ():
raise TypeError("Cannot take a Union of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), parameters, _root=True)
def __eq__(self, other):
if not isinstance(other, UnionMeta):
return NotImplemented
return self.__union_set_params__ == other.__union_set_params__
def __hash__(self):
return hash(self.__union_set_params__)
def __instancecheck__(self, obj):
raise TypeError("Unions cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if self.__union_params__ is None:
return isinstance(cls, UnionMeta)
elif isinstance(cls, UnionMeta):
if cls.__union_params__ is None:
return False
return all(issubclass(c, self) for c in (cls.__union_params__))
elif isinstance(cls, TypeVar):
if cls in self.__union_params__:
return True
if cls.__constraints__:
return issubclass(Union[cls.__constraints__], self)
return False
else:
return any(issubclass(cls, t) for t in self.__union_params__)
class Union(Final, metaclass=UnionMeta, _root=True):
"""Union type; Union[X, Y] means either X or Y.
To define a union, use e.g. Union[int, str]. Details:
- The arguments must be types and there must be at least one.
- None as an argument is a special case and is replaced by
type(None).
- Unions of unions are flattened, e.g.::
Union[Union[int, str], float] == Union[int, str, float]
- Unions of a single argument vanish, e.g.::
Union[int] == int # The constructor actually returns int
- Redundant arguments are skipped, e.g.::
Union[int, str, int] == Union[int, str]
- When comparing unions, the argument order is ignored, e.g.::
Union[int, str] == Union[str, int]
- When two arguments have a subclass relationship, the least
derived argument is kept, e.g.::
class Employee: pass
class Manager(Employee): pass
Union[int, Employee, Manager] == Union[int, Employee]
Union[Manager, int, Employee] == Union[int, Employee]
Union[Employee, Manager] == Employee
- Corollary: if Any is present it is the sole survivor, e.g.::
Union[int, Any] == Any
- Similar for object::
Union[int, object] == object
- To cut a tie: Union[object, Any] == Union[Any, object] == Any.
- You cannot subclass or instantiate a union.
- You cannot write Union[X][Y] (what would it mean?).
- You can use Optional[X] as a shorthand for Union[X, None].
"""
# Unsubscripted Union type has params set to None.
__union_params__ = None
__union_set_params__ = None
class OptionalMeta(TypingMeta):
"""Metaclass for Optional."""
def __new__(cls, name, bases, namespace, _root=False):
return super().__new__(cls, name, bases, namespace, _root=_root)
def __getitem__(self, arg):
arg = _type_check(arg, "Optional[t] requires a single type.")
return Union[arg, type(None)]
class Optional(Final, metaclass=OptionalMeta, _root=True):
"""Optional type.
Optional[X] is equivalent to Union[X, type(None)].
"""
__slots__ = ()
class TupleMeta(TypingMeta):
"""Metaclass for Tuple."""
def __new__(cls, name, bases, namespace, parameters=None,
use_ellipsis=False, _root=False):
self = super().__new__(cls, name, bases, namespace, _root=_root)
self.__tuple_params__ = parameters
self.__tuple_use_ellipsis__ = use_ellipsis
return self
def _has_type_var(self):
if self.__tuple_params__:
for t in self.__tuple_params__:
if _has_type_var(t):
return True
return False
def _eval_type(self, globalns, localns):
tp = self.__tuple_params__
if tp is None:
return self
p = tuple(_eval_type(t, globalns, localns) for t in tp)
if p == self.__tuple_params__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
p, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__tuple_params__ is not None:
params = [_type_repr(p) for p in self.__tuple_params__]
if self.__tuple_use_ellipsis__:
params.append('...')
r += '[%s]' % (
', '.join(params))
return r
def __getitem__(self, parameters):
if self.__tuple_params__ is not None:
raise TypeError("Cannot re-parameterize %r" % (self,))
if not isinstance(parameters, tuple):
parameters = (parameters,)
if len(parameters) == 2 and parameters[1] == Ellipsis:
parameters = parameters[:1]
use_ellipsis = True
msg = "Tuple[t, ...]: t must be a type."
else:
use_ellipsis = False
msg = "Tuple[t0, t1, ...]: each t must be a type."
parameters = tuple(_type_check(p, msg) for p in parameters)
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), parameters,
use_ellipsis=use_ellipsis, _root=True)
def __eq__(self, other):
if not isinstance(other, TupleMeta):
return NotImplemented
return self.__tuple_params__ == other.__tuple_params__
def __hash__(self):
return hash(self.__tuple_params__)
def __instancecheck__(self, obj):
raise TypeError("Tuples cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if not isinstance(cls, type):
return super().__subclasscheck__(cls) # To TypeError.
if issubclass(cls, tuple):
return True # Special case.
if not isinstance(cls, TupleMeta):
return super().__subclasscheck__(cls) # False.
if self.__tuple_params__ is None:
return True
if cls.__tuple_params__ is None:
return False # ???
if cls.__tuple_use_ellipsis__ != self.__tuple_use_ellipsis__:
return False
# Covariance.
return (len(self.__tuple_params__) == len(cls.__tuple_params__) and
all(issubclass(x, p)
for x, p in zip(cls.__tuple_params__,
self.__tuple_params__)))
class Tuple(Final, metaclass=TupleMeta, _root=True):
"""Tuple type; Tuple[X, Y] is the cross-product type of X and Y.
Example: Tuple[T1, T2] is a tuple of two elements corresponding
to type variables T1 and T2. Tuple[int, float, str] is a tuple
of an int, a float and a string.
To specify a variable-length tuple of homogeneous type, use Sequence[T].
"""
__slots__ = ()
class CallableMeta(TypingMeta):
"""Metaclass for Callable."""
def __new__(cls, name, bases, namespace, _root=False,
args=None, result=None):
if args is None and result is None:
pass # Must be 'class Callable'.
else:
if args is not Ellipsis:
if not isinstance(args, list):
raise TypeError("Callable[args, result]: "
"args must be a list."
" Got %.100r." % (args,))
msg = "Callable[[arg, ...], result]: each arg must be a type."
args = tuple(_type_check(arg, msg) for arg in args)
msg = "Callable[args, result]: result must be a type."
result = _type_check(result, msg)
self = super().__new__(cls, name, bases, namespace, _root=_root)
self.__args__ = args
self.__result__ = result
return self
def _has_type_var(self):
if self.__args__:
for t in self.__args__:
if _has_type_var(t):
return True
return _has_type_var(self.__result__)
def _eval_type(self, globalns, localns):
if self.__args__ is None and self.__result__ is None:
return self
if self.__args__ is Ellipsis:
args = self.__args__
else:
args = [_eval_type(t, globalns, localns) for t in self.__args__]
result = _eval_type(self.__result__, globalns, localns)
if args == self.__args__ and result == self.__result__:
return self
else:
return self.__class__(self.__name__, self.__bases__, {},
args=args, result=result, _root=True)
def __repr__(self):
r = super().__repr__()
if self.__args__ is not None or self.__result__ is not None:
if self.__args__ is Ellipsis:
args_r = '...'
else:
args_r = '[%s]' % ', '.join(_type_repr(t)
for t in self.__args__)
r += '[%s, %s]' % (args_r, _type_repr(self.__result__))
return r
def __getitem__(self, parameters):
if self.__args__ is not None or self.__result__ is not None:
raise TypeError("This Callable type is already parameterized.")
if not isinstance(parameters, tuple) or len(parameters) != 2:
raise TypeError(
"Callable must be used as Callable[[arg, ...], result].")
args, result = parameters
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__), _root=True,
args=args, result=result)
def __eq__(self, other):
if not isinstance(other, CallableMeta):
return NotImplemented
return (self.__args__ == other.__args__ and
self.__result__ == other.__result__)
def __hash__(self):
return hash(self.__args__) ^ hash(self.__result__)
def __instancecheck__(self, obj):
# For unparametrized Callable we allow this, because
# typing.Callable should be equivalent to
# collections.abc.Callable.
if self.__args__ is None and self.__result__ is None:
return isinstance(obj, collections_abc.Callable)
else:
raise TypeError("Callable[] cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if cls is Any:
return True
if not isinstance(cls, CallableMeta):
return super().__subclasscheck__(cls)
if self.__args__ is None and self.__result__ is None:
return True
# We're not doing covariance or contravariance -- this is *invariance*.
return self == cls
class Callable(Final, metaclass=CallableMeta, _root=True):
"""Callable type; Callable[[int], str] is a function of (int) -> str.
The subscription syntax must always be used with exactly two
values: the argument list and the return type. The argument list
must be a list of types; the return type must be a single type.
There is no syntax to indicate optional or keyword arguments,
such function types are rarely used as callback types.
"""
__slots__ = ()
def _gorg(a):
"""Return the farthest origin of a generic class."""
assert isinstance(a, GenericMeta)
while a.__origin__ is not None:
a = a.__origin__
return a
def _geqv(a, b):
"""Return whether two generic classes are equivalent.
The intention is to consider generic class X and any of its
parameterized forms (X[T], X[int], etc.) as equivalent.
However, X is not equivalent to a subclass of X.
The relation is reflexive, symmetric and transitive.
"""
assert isinstance(a, GenericMeta) and isinstance(b, GenericMeta)
# Reduce each to its origin.
return _gorg(a) is _gorg(b)
class GenericMeta(TypingMeta, abc.ABCMeta):
"""Metaclass for generic types."""
# TODO: Constrain more how Generic is used; only a few
# standard patterns should be allowed.
# TODO: Use a more precise rule than matching __name__ to decide
# whether two classes are the same. Also, save the formal
# parameters. (These things are related! A solution lies in
# using origin.)
__extra__ = None
def __new__(cls, name, bases, namespace,
parameters=None, origin=None, extra=None):
if parameters is None:
# Extract parameters from direct base classes. Only
# direct bases are considered and only those that are
# themselves generic, and parameterized with type
# variables. Don't use bases like Any, Union, Tuple,
# Callable or type variables.
params = None
for base in bases:
if isinstance(base, TypingMeta):
if not isinstance(base, GenericMeta):
raise TypeError(
"You cannot inherit from magic class %s" %
repr(base))
if base.__parameters__ is None:
continue # The base is unparameterized.
for bp in base.__parameters__:
if _has_type_var(bp) and not isinstance(bp, TypeVar):
raise TypeError(
"Cannot inherit from a generic class "
"parameterized with "
"non-type-variable %s" % bp)
if params is None:
params = []
if bp not in params:
params.append(bp)
if params is not None:
parameters = tuple(params)
self = super().__new__(cls, name, bases, namespace, _root=True)
self.__parameters__ = parameters
if extra is not None:
self.__extra__ = extra
# Else __extra__ is inherited, eventually from the
# (meta-)class default above.
self.__origin__ = origin
return self
def _has_type_var(self):
if self.__parameters__:
for t in self.__parameters__:
if _has_type_var(t):
return True
return False
def __repr__(self):
r = super().__repr__()
if self.__parameters__ is not None:
r += '[%s]' % (
', '.join(_type_repr(p) for p in self.__parameters__))
return r
def __eq__(self, other):
if not isinstance(other, GenericMeta):
return NotImplemented
return (_geqv(self, other) and
self.__parameters__ == other.__parameters__)
def __hash__(self):
return hash((self.__name__, self.__parameters__))
def __getitem__(self, params):
if not isinstance(params, tuple):
params = (params,)
if not params:
raise TypeError("Cannot have empty parameter list")
msg = "Parameters to generic types must be types."
params = tuple(_type_check(p, msg) for p in params)
if self.__parameters__ is None:
for p in params:
if not isinstance(p, TypeVar):
raise TypeError("Initial parameters must be "
"type variables; got %s" % p)
if len(set(params)) != len(params):
raise TypeError(
"All type variables in Generic[...] must be distinct.")
else:
if len(params) != len(self.__parameters__):
raise TypeError("Cannot change parameter count from %d to %d" %
(len(self.__parameters__), len(params)))
for new, old in zip(params, self.__parameters__):
if isinstance(old, TypeVar):
if not old.__constraints__:
# Substituting for an unconstrained TypeVar is OK.
continue
if issubclass(new, Union[old.__constraints__]):
# Specializing a constrained type variable is OK.
continue
if not issubclass(new, old):
raise TypeError(
"Cannot substitute %s for %s in %s" %
(_type_repr(new), _type_repr(old), self))
return self.__class__(self.__name__, self.__bases__,
dict(self.__dict__),
parameters=params,
origin=self,
extra=self.__extra__)
def __instancecheck__(self, instance):
# Since we extend ABC.__subclasscheck__ and
# ABC.__instancecheck__ inlines the cache checking done by the
# latter, we must extend __instancecheck__ too. For simplicity
# we just skip the cache check -- instance checks for generic
# classes are supposed to be rare anyways.
return self.__subclasscheck__(instance.__class__)
def __subclasscheck__(self, cls):
if cls is Any:
return True
if isinstance(cls, GenericMeta):
# For a class C(Generic[T]) where T is co-variant,
# C[X] is a subclass of C[Y] iff X is a subclass of Y.
origin = self.__origin__
if origin is not None and origin is cls.__origin__:
assert len(self.__parameters__) == len(origin.__parameters__)
assert len(cls.__parameters__) == len(origin.__parameters__)
for p_self, p_cls, p_origin in zip(self.__parameters__,
cls.__parameters__,
origin.__parameters__):
if isinstance(p_origin, TypeVar):
if p_origin.__covariant__:
# Covariant -- p_cls must be a subclass of p_self.
if not issubclass(p_cls, p_self):
break
elif p_origin.__contravariant__:
# Contravariant. I think it's the opposite. :-)
if not issubclass(p_self, p_cls):
break
else:
# Invariant -- p_cls and p_self must equal.
if p_self != p_cls:
break
else:
# If the origin's parameter is not a typevar,
# insist on invariance.
if p_self != p_cls:
break
else:
return True
# If we break out of the loop, the superclass gets a chance.
if super().__subclasscheck__(cls):
return True
if self.__extra__ is None or isinstance(cls, GenericMeta):
return False
return issubclass(cls, self.__extra__)
class Generic(metaclass=GenericMeta):
"""Abstract base class for generic types.
A generic type is typically declared by inheriting from an
instantiation of this class with one or more type variables.
For example, a generic mapping type might be defined as::
class Mapping(Generic[KT, VT]):
def __getitem__(self, key: KT) -> VT:
...
# Etc.
This class can then be used as follows::
def lookup_name(mapping: Mapping, key: KT, default: VT) -> VT:
try:
return mapping[key]
except KeyError:
return default
For clarity the type variables may be redefined, e.g.::
X = TypeVar('X')
Y = TypeVar('Y')
def lookup_name(mapping: Mapping[X, Y], key: X, default: Y) -> Y:
# Same body as above.
"""
__slots__ = ()
def __new__(cls, *args, **kwds):
next_in_mro = object
# Look for the last occurrence of Generic or Generic[...].
for i, c in enumerate(cls.__mro__[:-1]):
if isinstance(c, GenericMeta) and _gorg(c) is Generic:
next_in_mro = cls.__mro__[i+1]
return next_in_mro.__new__(_gorg(cls))
def cast(typ, val):
"""Cast a value to a type.
This returns the value unchanged. To the type checker this
signals that the return value has the designated type, but at
runtime we intentionally don't check anything (we want this
to be as fast as possible).
"""
return val
def _get_defaults(func):
"""Internal helper to extract the default arguments, by name."""
code = func.__code__
pos_count = code.co_argcount
kw_count = code.co_kwonlyargcount
arg_names = code.co_varnames
kwarg_names = arg_names[pos_count:pos_count + kw_count]
arg_names = arg_names[:pos_count]
defaults = func.__defaults__ or ()
kwdefaults = func.__kwdefaults__
res = dict(kwdefaults) if kwdefaults else {}
pos_offset = pos_count - len(defaults)
for name, value in zip(arg_names[pos_offset:], defaults):
assert name not in res
res[name] = value
return res
def get_type_hints(obj, globalns=None, localns=None):
"""Return type hints for a function or method object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, and if necessary
adds Optional[t] if a default value equal to None is set.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj, and these are also used as the locals. If the
object does not appear to have globals, an exception is raised.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if getattr(obj, '__no_type_check__', None):
return {}
if globalns is None:
globalns = getattr(obj, '__globals__', {})
if localns is None:
localns = globalns
elif localns is None:
localns = globalns
defaults = _get_defaults(obj)
hints = dict(obj.__annotations__)
for name, value in hints.items():
if isinstance(value, str):
value = _ForwardRef(value)
value = _eval_type(value, globalns, localns)
if name in defaults and defaults[name] is None:
value = Optional[value]
hints[name] = value
return hints
# TODO: Also support this as a class decorator.
def no_type_check(arg):
"""Decorator to indicate that annotations are not type hints.
The argument must be a class or function; if it is a class, it
applies recursively to all methods defined in that class (but not
to methods defined in its superclasses or subclasses).
This mutates the function(s) in place.
"""
if isinstance(arg, type):
for obj in arg.__dict__.values():
if isinstance(obj, types.FunctionType):
obj.__no_type_check__ = True
else:
arg.__no_type_check__ = True
return arg
def no_type_check_decorator(decorator):
"""Decorator to give another decorator the @no_type_check effect.
This wraps the decorator with something that wraps the decorated
function in @no_type_check.
"""
@functools.wraps(decorator)
def wrapped_decorator(*args, **kwds):
func = decorator(*args, **kwds)
func = no_type_check(func)
return func
return wrapped_decorator
def overload(func):
raise RuntimeError("Overloading is only supported in library stubs")
class _ProtocolMeta(GenericMeta):
"""Internal metaclass for _Protocol.
This exists so _Protocol classes can be generic without deriving
from Generic.
"""
def __instancecheck__(self, obj):
raise TypeError("Protocols cannot be used with isinstance().")
def __subclasscheck__(self, cls):
if not self._is_protocol:
# No structural checks since this isn't a protocol.
return NotImplemented
if self is _Protocol:
# Every class is a subclass of the empty protocol.
return True
# Find all attributes defined in the protocol.
attrs = self._get_protocol_attrs()
for attr in attrs:
if not any(attr in d.__dict__ for d in cls.__mro__):
return False
return True
def _get_protocol_attrs(self):
# Get all Protocol base classes.
protocol_bases = []
for c in self.__mro__:
if getattr(c, '_is_protocol', False) and c.__name__ != '_Protocol':
protocol_bases.append(c)
# Get attributes included in protocol.
attrs = set()
for base in protocol_bases:
for attr in base.__dict__.keys():
# Include attributes not defined in any non-protocol bases.
for c in self.__mro__:
if (c is not base and attr in c.__dict__ and
not getattr(c, '_is_protocol', False)):
break
else:
if (not attr.startswith('_abc_') and
attr != '__abstractmethods__' and
attr != '_is_protocol' and
attr != '__dict__' and
attr != '__slots__' and
attr != '_get_protocol_attrs' and
attr != '__parameters__' and
attr != '__origin__' and
attr != '__module__'):
attrs.add(attr)
return attrs
class _Protocol(metaclass=_ProtocolMeta):
"""Internal base class for protocol classes.
This implements a simple-minded structural isinstance check
(similar but more general than the one-offs in collections.abc
such as Hashable).
"""
__slots__ = ()
_is_protocol = True
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Hashable = collections_abc.Hashable # Not generic.
class Iterable(Generic[T_co], extra=collections_abc.Iterable):
__slots__ = ()
class Iterator(Iterable[T_co], extra=collections_abc.Iterator):
__slots__ = ()
class SupportsInt(_Protocol):
__slots__ = ()
@abstractmethod
def __int__(self) -> int:
pass
class SupportsFloat(_Protocol):
__slots__ = ()
@abstractmethod
def __float__(self) -> float:
pass
class SupportsComplex(_Protocol):
__slots__ = ()
@abstractmethod
def __complex__(self) -> complex:
pass
class SupportsBytes(_Protocol):
__slots__ = ()
@abstractmethod
def __bytes__(self) -> bytes:
pass
class SupportsAbs(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __abs__(self) -> T_co:
pass
class SupportsRound(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
class Reversible(_Protocol[T_co]):
__slots__ = ()
@abstractmethod
def __reversed__(self) -> 'Iterator[T_co]':
pass
Sized = collections_abc.Sized # Not generic.
class Container(Generic[T_co], extra=collections_abc.Container):
__slots__ = ()
# Callable was defined earlier.
class AbstractSet(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Set):
pass
class MutableSet(AbstractSet[T], extra=collections_abc.MutableSet):
pass
# NOTE: Only the value type is covariant.
class Mapping(Sized, Iterable[KT], Container[KT], Generic[VT_co],
extra=collections_abc.Mapping):
pass
class MutableMapping(Mapping[KT, VT], extra=collections_abc.MutableMapping):
pass
class Sequence(Sized, Iterable[T_co], Container[T_co],
extra=collections_abc.Sequence):
pass
class MutableSequence(Sequence[T], extra=collections_abc.MutableSequence):
pass
class ByteString(Sequence[int], extra=collections_abc.ByteString):
pass
ByteString.register(type(memoryview(b'')))
class List(list, MutableSequence[T]):
def __new__(cls, *args, **kwds):
if _geqv(cls, List):
raise TypeError("Type List cannot be instantiated; "
"use list() instead")
return list.__new__(cls, *args, **kwds)
class Set(set, MutableSet[T]):
def __new__(cls, *args, **kwds):
if _geqv(cls, Set):
raise TypeError("Type Set cannot be instantiated; "
"use set() instead")
return set.__new__(cls, *args, **kwds)
class _FrozenSetMeta(GenericMeta):
"""This metaclass ensures set is not a subclass of FrozenSet.
Without this metaclass, set would be considered a subclass of
FrozenSet, because FrozenSet.__extra__ is collections.abc.Set, and
set is a subclass of that.
"""
def __subclasscheck__(self, cls):
if issubclass(cls, Set):
return False
return super().__subclasscheck__(cls)
class FrozenSet(frozenset, AbstractSet[T_co], metaclass=_FrozenSetMeta):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, FrozenSet):
raise TypeError("Type FrozenSet cannot be instantiated; "
"use frozenset() instead")
return frozenset.__new__(cls, *args, **kwds)
class MappingView(Sized, Iterable[T_co], extra=collections_abc.MappingView):
pass
class KeysView(MappingView[KT], AbstractSet[KT],
extra=collections_abc.KeysView):
pass
# TODO: Enable Set[Tuple[KT, VT_co]] instead of Generic[KT, VT_co].
class ItemsView(MappingView, Generic[KT, VT_co],
extra=collections_abc.ItemsView):
pass
class ValuesView(MappingView[VT_co], extra=collections_abc.ValuesView):
pass
class Dict(dict, MutableMapping[KT, VT]):
def __new__(cls, *args, **kwds):
if _geqv(cls, Dict):
raise TypeError("Type Dict cannot be instantiated; "
"use dict() instead")
return dict.__new__(cls, *args, **kwds)
# Determine what base class to use for Generator.
if hasattr(collections_abc, 'Generator'):
# Sufficiently recent versions of 3.5 have a Generator ABC.
_G_base = collections_abc.Generator
else:
# Fall back on the exact type.
_G_base = types.GeneratorType
class Generator(Iterator[T_co], Generic[T_co, T_contra, V_co],
extra=_G_base):
__slots__ = ()
def __new__(cls, *args, **kwds):
if _geqv(cls, Generator):
raise TypeError("Type Generator cannot be instantiated; "
"create a subclass instead")
return super().__new__(cls, *args, **kwds)
def NamedTuple(typename, fields):
"""Typed version of namedtuple.
Usage::
Employee = typing.NamedTuple('Employee', [('name', str), 'id', int)])
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has one extra attribute: _field_types,
giving a dict mapping field names to types. (The field names
are in the _fields attribute, which is part of the namedtuple
API.)
"""
fields = [(n, t) for n, t in fields]
cls = collections.namedtuple(typename, [n for n, t in fields])
cls._field_types = dict(fields)
return cls
class IO(Generic[AnyStr]):
"""Generic base class for TextIO and BinaryIO.
This is an abstract, generic version of the return of open().
NOTE: This does not distinguish between the different possible
classes (text vs. binary, read vs. write vs. read/write,
append-only, unbuffered). The TextIO and BinaryIO subclasses
below capture the distinctions between text vs. binary, which is
pervasive in the interface; however we currently do not offer a
way to track the other distinctions in the type system.
"""
__slots__ = ()
@abstractproperty
def mode(self) -> str:
pass
@abstractproperty
def name(self) -> str:
pass
@abstractmethod
def close(self) -> None:
pass
@abstractmethod
def closed(self) -> bool:
pass
@abstractmethod
def fileno(self) -> int:
pass
@abstractmethod
def flush(self) -> None:
pass
@abstractmethod
def isatty(self) -> bool:
pass
@abstractmethod
def read(self, n: int = -1) -> AnyStr:
pass
@abstractmethod
def readable(self) -> bool:
pass
@abstractmethod
def readline(self, limit: int = -1) -> AnyStr:
pass
@abstractmethod
def readlines(self, hint: int = -1) -> List[AnyStr]:
pass
@abstractmethod
def seek(self, offset: int, whence: int = 0) -> int:
pass
@abstractmethod
def seekable(self) -> bool:
pass
@abstractmethod
def tell(self) -> int:
pass
@abstractmethod
def truncate(self, size: int = None) -> int:
pass
@abstractmethod
def writable(self) -> bool:
pass
@abstractmethod
def write(self, s: AnyStr) -> int:
pass
@abstractmethod
def writelines(self, lines: List[AnyStr]) -> None:
pass
@abstractmethod
def __enter__(self) -> 'IO[AnyStr]':
pass
@abstractmethod
def __exit__(self, type, value, traceback) -> None:
pass
class BinaryIO(IO[bytes]):
"""Typed version of the return of open() in binary mode."""
__slots__ = ()
@abstractmethod
def write(self, s: Union[bytes, bytearray]) -> int:
pass
@abstractmethod
def __enter__(self) -> 'BinaryIO':
pass
class TextIO(IO[str]):
"""Typed version of the return of open() in text mode."""
__slots__ = ()
@abstractproperty
def buffer(self) -> BinaryIO:
pass
@abstractproperty
def encoding(self) -> str:
pass
@abstractproperty
def errors(self) -> str:
pass
@abstractproperty
def line_buffering(self) -> bool:
pass
@abstractproperty
def newlines(self) -> Any:
pass
@abstractmethod
def __enter__(self) -> 'TextIO':
pass
class io:
"""Wrapper namespace for IO generic classes."""
__all__ = ['IO', 'TextIO', 'BinaryIO']
IO = IO
TextIO = TextIO
BinaryIO = BinaryIO
io.__name__ = __name__ + '.io'
sys.modules[io.__name__] = io
Pattern = _TypeAlias('Pattern', AnyStr, type(stdlib_re.compile('')),
lambda p: p.pattern)
Match = _TypeAlias('Match', AnyStr, type(stdlib_re.match('', '')),
lambda m: m.re.pattern)
class re:
"""Wrapper namespace for re type aliases."""
__all__ = ['Pattern', 'Match']
Pattern = Pattern
Match = Match
re.__name__ = __name__ + '.re'
sys.modules[re.__name__] = re
|
the-stack_0_10053 | # -*- coding: utf-8 -*-
#
# John C. Thomas 2021 gpSTS
import torch
import torch.nn as nn
import torch.utils.data as dataloader
import torchvision
from torchvision.datasets import DatasetFolder
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import Config
import Config as conf
def make_predictions(model, device, test_loader):
# Set model to eval mode to notify all layers.
model.eval()
targets = []
preds = []
# Set torch.no_grad() to disable gradient computation and backpropagation
with torch.no_grad():
for sample in test_loader:
data, target = sample
data, target = data.to(device), target.to(device)
# Predict for data by doing forward pass
output = model(data)
pred = output.max(1, keepdim=True)[1]
preds.append(pred.cpu().numpy())
targets.append(target.cpu().numpy())
targets = [np.hstack(y) for y in targets]
preds = [np.hstack(y) for y in preds]
targets = np.hstack(targets)
preds = np.hstack(preds)
return targets, preds
def progbar(curr, total, full_progbar, epoch, num_epochs, loss, accuracy):
frac = curr/total
filled_progbar = round(frac*full_progbar)
print('\r',
'#'*filled_progbar + '-'*(full_progbar-filled_progbar),
f'Epoch [{epoch}/{num_epochs}]',
f'Step [{curr}/{total}]',
'Loss: {:.6f}'.format(loss),
'Accuracy: [{:>7.2%}]'.format(accuracy),
end='')
def specnorm(data):
dmin = np.min(data)
dmax = np.max(data)
out = np.zeros(data.shape[0])
for i in range(0,data.shape[0]):
out[i] = (data[i] - dmin)/(dmax-dmin)
return out
def np_loader(path):
with open(path, 'rb') as f:
data = np.load(f,allow_pickle=True)
dnp = data[0]
dnp = specnorm(dnp)
dout = torch.from_numpy(dnp).float()
return torch.reshape(dout,(1,len(dnp)))
def spec_loader(data):
dnp = data
dnp = specnorm(dnp)
dout = torch.from_numpy(dnp).float()
return torch.reshape(dout,(1,1,len(dnp)))
def dplot(imagein, title='Interpolated'):
fig, ax = plt.subplots()
z_min, z_max = imagein.min(), imagein.max()
xx, yy = np.meshgrid(np.linspace(1, imagein.shape[0], imagein.shape[0]), np.linspace(1, imagein.shape[1], imagein.shape[1]))
x = xx[::1]
y = yy[::1]
cout = ax.pcolormesh(x, y, imagein, cmap='bwr', vmin=z_min, vmax=z_max)
ax.set_title(title)
ax.axis([x.min(), x.max(), y.min(), y.max()])
ax.get_xaxis().set_ticks([])
ax.get_yaxis().set_ticks([])
plt.axis('scaled')
fig.tight_layout()
plt.show()
def dscplot(dx,dy,di, title='Collected Points'):
fig = plt.figure(1, clear=True)
plt.scatter(dx, dy, c=di, cmap='viridis') #0,1
plt.title(title)
plt.colorbar()
plt.axis('scaled')
fig.tight_layout()
plt.show()
# Convolutional neural network
class Conv1d(nn.Module):
def __init__(self,num_classes=4):
super(Conv1d, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv1d(1, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(64),
nn.ReLU(),
nn.MaxPool1d(2, stride=1))
self.layer2 = nn.Sequential(
nn.Conv1d(64, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.MaxPool1d(2, stride=1))
self.layer3 = nn.Dropout(p=0.2)
self.fc = nn.Linear(self.getinput(), num_classes)
def size_postopt(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
return out.size()
def getinput(self):
size = self.size_postopt(torch.rand(1,1,conf.nanonis_config['Nanonis_Settings']['NumSpectralPoints'])) # image size: 64x32
m = 1
for i in size:
m *= i
return int(m)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out |
the-stack_0_10054 | import os
import numpy as np
from matplotlib import pyplot as plt
import figlatex
import hist2d
import colormap
commands = [
'-m 100000 -L 15250 -U 15850 darksidehd/merged_000886.root:53',
'-m 100000 -L 750 -v 750 -l 8900 darksidehd/nuvhd_lf_3x_tile53_77K_64V_6VoV_1.wav',
'-m 100000 -L 750 -v 750 -l 8900 darksidehd/nuvhd_lf_3x_tile53_77K_66V_7VoV_1.wav',
]
###########################
figs = []
cmap = colormap.uniform()
for ifile, cmd in enumerate(commands):
figname = f'fighist2dtile53-{ifile}'
fig = plt.figure(num=figname, clear=True, figsize=[9, 4])
save = f'figthesis/{figname}.npz'
if not os.path.exists(save):
hist = hist2d.Hist2D(cmd.split())
print(f'save {save}...')
hist.save(save, compress=True)
print(f'load {save}...')
hist = hist2d.Hist2D.load(save)
hist.hist2d(fig, cmap=cmap)
figs.append(fig)
for fig in figs:
figlatex.save(fig)
for fig in figs:
fig.show()
|
the-stack_0_10056 | ### Team6 main.py ###
### author: tanahashi, kurita, ito ###
import os
import eel
import csv
import datetime
from datetime import datetime as dt
import numpy
import random
import matplotlib.pyplot as plt
import japanize_matplotlib # グラフの日本語表示に必要
from typing import Counter
# import importer
# import exporter
# P000の初期PWは000b
print("404 Not Found エラーが出た場合、VSCodeでこのファイルを開いてから実行してみてください。")
eel.init("MainProject/view")
eel.start("login.html", size=(800, 480), block=False)
@eel.expose
def registtData():
#print(registtDatatoPy())
try:
if registtDatatoPy() == True:
return "tomato"
else:
return "onion"
except(KeyError):
return "onion"
def gettData():
tData = eel.sendtDatatoPy()()
gtID = tData[0]
gtPW = tData[1]
return gtID, gtPW
tID, tPW = "xxxx", "yyyy"
#main.htmlで入力されたtIDとtPWを照合した先の処理
def registtDatatoPy():
global tID, tPW
tID, tPW = gettData()
print("tID: {0} tPW: {1}".format(tID, tPW))
if tIDtPWverify(tID,tPW):
print("Yeeeeeeeeee")
return True
else:
print("Noooooooooo")
return False
#教員ファイル読み込み/tID,tPW照合
def tIDtPWverify(tID,tPW):
tID, tPW = gettData()
tnamecsv = {}
with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tnamecsv[row["ID"]] = row["氏名"]
print(tnamecsv[tID])
tpwcsv = {}
with open("./data/tPW.csv","r")as p:
reader = csv.DictReader(p)
for prow in reader:
tpwcsv[prow["tID"]] = prow["tPW"]
tPWoncsv = tpwcsv[tID]
#print(tPWoncsv)
if tPW == tPWoncsv:
return True
else:
return False
#管理モードで教員氏名を表示
@eel.expose
def picktName():
try:
global tID
tnamecsv = {}
with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tnamecsv[row["ID"]] = row["氏名"]
#print(tnamecsv[tID])
tName = str(tnamecsv[tID])
print("user: " + tName)
eel.printtName(tName)
except(FileNotFoundError):
os.getcwd()
os.chdir("./team6/MainProject/")
picktName()
# reader = "x"
tcName = ["xx", "xx"]
tcDay = [0, 0]
tcPeriod = [0, 0]
@eel.expose
def pickcName():
global tID
global tcName
global tcDay
global tcPeriod
# tccsv = [[0] * 5 for i in range(4)]
# print(tccsv)
# tcName = [[0] * 5 for i in range(4)]
# tccsvx = []
# for i in range(5):
# with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
# reader = csv.DictReader(f)
# for row in reader:
# print(row)
# tanto = str('担当科目' + str(i+1))
# print(tanto)
# tccsvx[row["ID"]] = row["担当科目1"]
# tcName[i] = str(tccsvx[tID])
# print("calss1: " + tcName[i])
tc1csv = {}
tc2csv = {}
tcName = ["name", "name"]
with open("./data/教員・担当科目リスト.csv", "r", encoding="utf_8", errors="", newline="") as f:
reader = csv.DictReader(f)
for row in reader:
tc1csv[row["ID"]] = row["担当科目1"]
tc2csv[row["ID"]] = row["担当科目2"]
tcName[0] = str(tc1csv[tID])
tcName[1] = str(tc2csv[tID])
print("calss1: " + tcName[0])
print("calss2: " + tcName[1])
# tcID = [[0] * 5 for i in range(4)]
# tcxID = [[0] * 5 for i in range(4)]
# for j in range(5):
# with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
# reader = csv.DictReader(p)
# for row in reader:
# tcxID[j][row["科目名"]] = row["講義ID"]
# tcID[j] = str(tcxID[tc1Name])
# print("classID: " + tcID[j])
tc1xID = {}
tc2xID = {}
with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tc1xID[row["科目名"]] = row["講義ID"]
tc2xID[row["科目名"]] = row["講義ID"]
tc1ID = str(tc1xID[tcName[0]])
try:
tc2ID = str(tc2xID[tcName[1]])
except(KeyError):
tc2ID = "X0_"
print("calss1ID: " + tc1ID)
print("calss2ID: " + tc2ID)
tcDay = [0, 0]
tcPeriod = [0, 0]
cID = [tc1ID, tc2ID]
for n in range(0, len(cID)):
# print(n)
# print(len(cID))
if('M' in cID[n]):
tcDay[n] = '月'
elif('Tu' in cID[n]):
tcDay[n] = '火'
elif('W' in cID[n]):
tcDay[n] = '水'
elif('Th' in cID[n]):
tcDay[n] = '木'
elif('F' in cID[n]):
tcDay[n] = '金'
else:
tcDay[n] = ''
tcName[1] = "undefined"
print('Day config error')
if('12_' in cID[n]):
tcPeriod[n] = '1,2限'
elif('23_' in cID[n]):
tcPeriod[n] = '2,3限'
elif('34_' in cID[n]):
tcPeriod[n] = '3,4限'
elif('45_' in cID[n]):
tcPeriod[n] = '4,5限'
elif('1_' in cID[n]):
tcPeriod[n] = '1限'
elif('2_' in cID[n]):
tcPeriod[n] = '2限'
elif('3_' in cID[n]):
tcPeriod[n] = '3限'
elif('4_' in cID[n]):
tcPeriod[n] = '4限'
elif('5_' in cID[n]):
tcPeriod[n] = '5限'
else:
tcPeriod[n] = ''
print('Class period config error')
try:
print(tcDay[n] + tcPeriod[n])
except(TypeError):
pass
except(IndexError):
pass
n = n+1
tc1Name = tcName[0]
tc2Name = tcName[1]
tclen = len(tcName)
tclen = 5
eel.addcData(tcName, tclen, tcDay, tcPeriod)
#adminでの分岐用
@eel.expose
def clidSet(clid):
global tcName
global tcDay
global tcPeriod
print(clid)
print(tcName)
cDay = "0"
cPeriod = "0"
try:
if clid == "101":
cConfig = tcName[0]
cDay = tcDay[0]
cPeriod = tcPeriod[0]
elif clid == "102":
cConfig = tcName[1]
cDay = tcDay[1]
cPeriod = tcPeriod[1]
elif clid == "103":
cConfig = tcName[2]
cDay = tcDay[2]
cPeriod = tcPeriod[2]
elif clid == "104":
cConfig = tcName[3]
cDay = tcDay[3]
cPeriod = tcPeriod[4]
elif clid == "105":
cConfig = tcName[4]
cDay = tcDay[4]
cPeriod = tcPeriod[4]
except(IndexError):
pass
print(cConfig)
tcxID = {}
tcxCT1 = {}
tcxCT2 = {}
tcxLT1 = {}
tcxLT2 = {}
with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tcxID[row["科目名"]] = row["講義ID"]
tcxCT1[row["科目名"]] = row["開始時間"]
tcxCT2[row["科目名"]] = row["終了時間"]
tcxLT1[row["科目名"]] = row["出席限度(分)"]
tcxLT2[row["科目名"]] = row["遅刻限度(分)"]
tccID = str(tcxID[cConfig])
tccCT1 = str(tcxCT1[cConfig])
tccCT2 = str(tcxCT2[cConfig])
tccLT1 = str(tcxLT1[cConfig])
tccLT2 = str(tcxLT2[cConfig])
print("ID: " + tccID)
print("Day: " + cDay)
print("Period:" + cPeriod)
print("Start: " + tccCT1)
print("End: " + tccCT2)
print("Limit1:" + tccLT1)
print("Limit2:" + tccLT2)
tccCT1 = str(tcxCT1[cConfig])
tccCT2 = str(tcxCT2[cConfig])
tccLT1 = str(tcxCT1[cConfig][0:5])
tccLT2 = str(tcxCT1[cConfig][0:5])
tcxLT1m = int(tcxLT1[cConfig])
tcxLT2m = int(tcxLT2[cConfig])
# tcxLT1m = dt.strptime(tcxLT1m, '%H:%M:%S')
# tcxLT2m = dt.strptime(tcxLT2m, '%H:%M:%S')
tccCT1t = dt.strptime(tccCT1, '%H:%M')
tccCT2t = dt.strptime(tccCT2, '%H:%M')
tccLT1t = dt.strptime(tccLT1, '%H:%M')
tccLT2t = dt.strptime(tccLT2, '%H:%M')
tccLT1t = tccLT1t + datetime.timedelta(minutes=tcxLT1m)
tccLT2t = tccLT2t + datetime.timedelta(minutes=tcxLT2m)
tccCT1 = str(tccCT1t.time())
tccCT2 = str(tccCT2t.time())
tccLT1 = str(tccLT1t.time())
tccLT2 = str(tccLT2t.time())
tccCT1 = tccCT1[0:5]
tccCT2 = tccCT2[0:5]
tccLT1 = tccLT1[0:5]
tccLT2 = tccLT2[0:5]
print("授業開始: " + tccCT1)
print("授業終了: " + tccCT2)
print("以降遅刻: " + tccLT1)
print("以降欠席: " + tccLT2)
eel.initialID(cConfig, tccID, cDay, cPeriod, tccCT1, tccCT2, tccLT1, tccLT2)
# eel.initialCT(tccCT1, tccCT2)
# eel.initialLT(tccLT1, tccLT2)
# return tccCT1, tccCT2, tccLT1, tccLT2
datew = datetime.date.today()
datew = datew.strftime("%Y_%m_%d")
print(datew)
# 仮の出席者
# main author: ito
def stdSim(cID):
number=range(1,101)
rnumber=random.sample(number,len(number)) #学籍番号を(ランダムに)生成
temlist=[]
for i in rnumber:
temNo= "S{:0>3}".format(i) #"S001" "S012"のように3桁表示
temlist.append(temNo) #temlistはS001からS100の100個の要素からなるリスト
#講義IDに一致した履修者csvを開く
stdIDmx = {} #辞書型
stdIDm = [] #配列
stdcsvName = "./data/履修者-" + cID + ".csv"
with open(stdcsvName, "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
stdIDmx[row["学籍番号"]] = row["IDm"]
for i in range(len(temlist)):
try:
IDm = str(stdIDmx[temlist[i]])
stdIDm.append(IDm)
except KeyError:
pass
# print(stdcsvName)
# print(len(stdIDm))
return stdIDm
IOcsvName = "xx"
#出欠リストCSV操作 兼 出席シミュレータ
@eel.expose
def openIOcsv(cID, cName):
global datew
global IOcsvName
tcxCT1 = {}
tcxCT2 = {}
tcxLT1 = {}
tcxLT2 = {}
with open("./data/講義科目ルール.csv", "r", encoding="utf_8", errors="", newline="") as p:
reader = csv.DictReader(p)
for row in reader:
tcxCT1[row["科目名"]] = row["開始時間"]
tcxCT2[row["科目名"]] = row["終了時間"]
tcxLT1[row["科目名"]] = row["出席限度(分)"]
tcxLT2[row["科目名"]] = row["遅刻限度(分)"]
tccCT1 = str(tcxCT1[cName]) + ":00"
tccCT2 = str(tcxCT2[cName]) + ":00"
tccLT1 = str(tcxCT1[cName][0:5]) + ":00"
tccLT2 = str(tcxCT1[cName][0:5]) + ":00"
tcxLT1m = int(tcxLT1[cName])
tcxLT2m = int(tcxLT2[cName])
# tcxLT1m = dt.strptime(tcxLT1m, '%H:%M:%S')
# tcxLT2m = dt.strptime(tcxLT2m, '%H:%M:%S')
tccCT1t = dt.strptime(tccCT1, '%H:%M:%S')
tccCT2t = dt.strptime(tccCT2, '%H:%M:%S')
tccLT1t = dt.strptime(tccLT1, '%H:%M:%S')
tccLT2t = dt.strptime(tccLT2, '%H:%M:%S')
tccLT1t = tccLT1t + datetime.timedelta(minutes=tcxLT1m)
tccLT2t = tccLT2t + datetime.timedelta(minutes=tcxLT2m)
tccCT1t = tccCT1t.time()
tccCT2t = tccCT2t.time()
tccLT1t = tccLT1t.time()
tccLT2t = tccLT2t.time()
print("授業開始: " + str(tccCT1t))
print("授業終了: " + str(tccCT2t))
print("以降遅刻: " + str(tccLT1t))
print("以降欠席: " + str(tccLT2t))
LimitTime = [tccCT1t, tccCT2t, tccLT1t, tccLT2t]
stdIDm = stdSim(cID)
# print(stdIDm)
stdIDx = {}
stdNamex = {}
stdID = []
stdName = []
print("Preparations are underway: " + cName)
dirName = "./Mainproject/IOList/" + cName
IOcsvName = "./Mainproject/IOList/" + cName + "/" + cName + datew + "出欠リスト.csv"
stdcsvName = "./data/履修者-" + cID + ".csv"
if(os.path.exists(dirName) == False):
os.mkdir(dirName)
#履修者のリストを取得
with open(stdcsvName, "r", encoding="utf_8", errors="") as stdcsv:
reader = csv.DictReader(stdcsv)
for row in reader:
stdIDx[row["IDm"]] = row["学籍番号"]
stdNamex[row["IDm"]] = row["名前"]
stdlen = len(stdIDm)
print("履修者数: " + str(stdlen))
for i in range(len(stdIDm)):
try:
try:
stdID.append(str(stdIDx[stdIDm[i]]))
stdName.append(str(stdNamex[stdIDm[i]]))
except(KeyError):
stdID.append("S000")
stdName.append("名無ノ権兵衛")
except(IndexError):
pass
#初期出欠リストcsv作成
if(os.path.exists(IOcsvName) == False):
with open(IOcsvName, "w", encoding="utf_8", newline="") as IOcsv:
writer = csv.writer(IOcsv)
writer.writerow(["学籍番号", "名前", "IDm", "入室時刻", "出欠"])
for k in range(len(stdIDm)):
writer.writerow([stdID[k], stdName[k], stdIDm[k], "00:00:00", "欠席"])
# ソート
with open(IOcsvName, "r", encoding="utf_8") as IOcsvs:
reader = csv.DictReader(IOcsvs)
IOdict = []
for row in reader:
IOdict.append(row)
sortedIOdict = sorted(IOdict, key=lambda x:x["学籍番号"])
with open(IOcsvName, "w", encoding="utf_8", newline="") as IOcsvw:
writer2 = csv.writer(IOcsvw)
writer2.writerow(["学籍番号", "名前", "IDm", "入室時刻", "出欠"])
for g in range(len(stdIDm)):
dictvalues = sortedIOdict[g].values()
writer2.writerow(dictvalues)
# print(stdID)
# print(stdName)
# for in rangeでstdIDとstdNameをJS関数に投げることで出席
# 適度な間隔をあけて
# カードタッチ間隔
timespanx = numpy.random.normal(
loc = 7, # 平均
scale = (len(stdIDm)/6), # 標準偏差
size = len(stdIDm) # 出力配列のサイズ
)
timespan = timespanx
tmp = 0
for j in range(len(timespanx)):
timespan[j] = int(timespan[j])
tmp = tmp + timespan[j]
# print(timespan)
print(tmp/60)
#出席リスト更新
def touchIDcard(no, stdlenx, LimitTime):
dtNow = datetime.datetime.now()
now = dtNow.time()
print(now)
status = "出席"
if now < LimitTime[2]:
status = "出席"
elif now < LimitTime[3]:
status = "遅刻"
elif now < LimitTime[1]:
status = "欠席"
else:
status = "欠席"
print(status)
eel.showIDinfo(stdID[no], stdName[no])
eel.showNo(no + 1, stdlenx)
eel.showStatus(status)
f = open(IOcsvName, "r", encoding="utf-8")
csv_data = csv.reader(f)
list = [ e for e in csv_data]
f.close()
now = str(now)
now = now[0:8]
# 更新後のデータ
data = [stdID[no], stdName[no], stdIDm[no], now, status]
for i in range(len(list)):
if list[i][0]==data[0]:
list[i] = data
# csv更新
with open(IOcsvName, "w", encoding="utf_8", newline="") as f:
writer = csv.writer(f)
writer.writerows(list)
# タッチのトリガー
eel.sleep(3)
for s in range(len(stdIDm)):
if s != (len(stdIDm)-1):
if timespan[s]<=0:
timespan[s] = (timespan[s] * -1) + 1
print(timespan[s], end=" ")
print(stdIDm[s])
touchIDcard(s, stdlen, LimitTime)
eel.sleep(timespan[s])
else:
# 遅刻ちゃん
num = random.randint(0,9)
print(num)
if num > 8:
eel.sleep(800)
elif num > 7:
eel.sleep(300)
elif num > 4:
eel.sleep(60)
else:
eel.sleep(3)
print(stdIDm[s])
touchIDcard(s, stdlen, LimitTime)
@eel.expose
def generateIOcsvName(clid):
global tcName
try:
if clid == "101":
cName = tcName[0]
elif clid == "102":
cName = tcName[1]
elif clid == "103":
cName = tcName[2]
elif clid == "104":
cName = tcName[3]
elif clid == "105":
cName = tcName[4]
except(IndexError):
pass
IOcsvName = "./Mainproject/IOList/" + cName + "/" + cName + datew + "出欠リスト.csv"
print(IOcsvName)
eel.getcName(cName)
eel.getIOcsvName(IOcsvName)
@eel.expose
def updateIOcsv(cDataPockets):
newcData = cDataPockets
print(newcData[0])
print(newcData[1])
print(newcData[2])
print(newcData[3])
print(newcData[4])
cName = newcData[0]
newcDay = newcData[1]
newcPeri = newcData[2]
newLT1 = newcData[3]
newLT2 = newcData[4]
f = open("./data/講義科目ルール.csv", "r", encoding="utf-8")
csv_data = csv.reader(f)
list = [ e for e in csv_data]
f.close()
# print(list)
# newcID
for s in range(len(list)):
if list[s][1]==cName:
basecID = list[s][0]
tID = list[s][2]
tName = list[s][3]
exam = list[s][8]
sNo = list[s][9]
newcID = newcDay + newcPeri
if basecID[-1:] == "1":
newcID = newcID + "1"
if basecID[-1:] == "2":
newcID = newcID + "2"
if basecID[-1:] == "3":
newcID = newcID + "3"
if basecID[-1:] == "4":
newcID = newcID + "4"
# cID重複回避
for t in range(len(list)):
if list[t][0]==newcID:
if list[t][1]!=cName:
excID = list[t][0]
if excID[-1:] == "_":
newcID = newcID + "1"
elif excID[-1:] == "1":
newcID = newcID[:-1] + "2"
elif excID[-1:] == "2":
newcID = newcID[:-1] + "1"
if excID[-1:] == "2":
newcID = newcID[:-1] + "3"
elif excID[-1:] == "3":
newcID = newcID[:-1] + "4"
# newCT1, newCT2 (授業開始、終了時刻)
if newcPeri == "1_":
newCT1 = "09:00"
newCT2 = "10:30"
if newcPeri == "2_":
newCT1 = "10:40"
newCT2 = "12:10"
if newcPeri == "3_":
newCT1 = "13:00"
newCT2 = "14:30"
if newcPeri == "4_":
newCT1 = "14:40"
newCT2 = "16:10"
if newcPeri == "5_":
newCT1 = "16:20"
newCT2 = "17:50"
if newcPeri == "12_":
newCT1 = "09:00"
newCT2 = "12:10"
if newcPeri == "23_":
newCT1 = "10:40"
newCT2 = "14:30"
if newcPeri == "34_":
newCT1 = "13:00"
newCT2 = "16:10"
if newcPeri == "45_":
newCT1 = "14:40"
newCT2 = "17:50"
# newLT1 (出席限度)
newCT1t = dt.strptime(newCT1, '%H:%M')
newCT2t = dt.strptime(newCT2, '%H:%M')
newLT1t = dt.strptime(newLT1, '%H:%M')
newLT2t = dt.strptime(newLT2, '%H:%M')
if newLT1t<newCT1t:
eel.showErrorInfo()
return
if newLT2t<newCT1t:
eel.showErrorInfo()
return
if newLT2t<newLT1t:
eel.showErrorInfo()
return
if newCT2t<newLT2t:
eel.showErrorInfo()
return
newLT1t = newLT1t - newCT1t
newLT2t = newLT2t - newCT1t
newLT1 = str(newLT1t)
newLT2 = str(newLT2t)
print(newLT1)
print(newLT2)
newLT1 = newLT1[2:4]
newLT2 = newLT2[2:4]
if newLT1 == " d":
newLT1 = "00"
if newLT2 == " d":
newLT2 = "00"
# 更新後のデータ
data = [newcID, cName, tID, tName, newCT1, newCT2, newLT1, newLT2, exam, sNo]
print(data)
for i in range(len(list)):
if list[i][1]==cName:
list[i] = data
# csv更新
with open("./data/講義科目ルール.csv", "w", encoding="utf_8", newline="") as f:
writer = csv.writer(f)
writer.writerows(list)
eel.toAdmin()
#出欠リスト表示用
@eel.expose
def chooseIOList(cName, iNo):
path = "./Mainproject/IOList/" + cName + "/"
try:
IOcsvNames = os.listdir(path)
except(FileNotFoundError):
eel.showNameError()
return
csvNo = len(IOcsvNames)
listS = []
sStatusVal = []
for c in range(csvNo):
IOcsvNamepath = path + IOcsvNames[c]
print(IOcsvNamepath)
f = open(IOcsvNamepath, "r", encoding="utf-8")
csv_data = csv.reader(f)
listS = [ o for o in csv_data]
f.close()
# print(listS)
sStatusVal.append(listS)
# 最新の出欠リスト
IOcsvNamepath = path + IOcsvNames[int(iNo)]
nIOcsvName = IOcsvNames[int(iNo)]
print(IOcsvNamepath)
f = open(IOcsvNamepath, "r", encoding="utf-8")
csv_data = csv.reader(f)
list = [ e for e in csv_data]
f.close()
sID = []
sName = []
sIDm = []
sIntime = []
sStatus = []
sStatusValApnd = 0
sStatusValLate = 0
sStatusValAbsc = 0
sStatusRates = []
sNo = len(list)-1
for i in range(sNo):
sID.append(list[i+1][0])
sName.append(list[i+1][1])
sIDm.append(list[i+1][2])
sIntime.append(list[i+1][3])
sStatus.append(list[i+1][4])
for x in range(csvNo):
if sStatusVal[x][i+1][4] == "出席":
sStatusValApnd += 1
elif sStatusVal[x][i+1][4] == "遅刻":
sStatusValLate += 1
elif sStatusVal[x][i+1][4] == "欠席":
sStatusValAbsc += 1
rate = str(sStatusValApnd) + "/" + str(sStatusValApnd + sStatusValLate + sStatusValAbsc)
# rate = round(rate)
# rate = str(rate) + "%"
sStatusRates.append(rate)
sStatusValApnd = 0
sStatusValLate = 0
sStatusValAbsc = 0
# print(sStatusRates)
# print(list)
eel.createIOTable(sID, sName, sIDm, sIntime, sStatus, sStatusRates, sNo, nIOcsvName, csvNo, IOcsvNames)
@eel.expose
def createOneClassGraph(cName, iNo):
# 講義回グラフ作成
# main author: kurita
path = "./Mainproject/IOList/" + cName + "/"
IOcsvNames = os.listdir(path)
print(path)
print(IOcsvNames)
# 最新の出欠リスト
IOcsvName = path + IOcsvNames[int(iNo)]
#グラフタイトル用の読み込みです。
file_path = IOcsvName
file_name_path=os.path.basename(file_path)
#出席,遅刻,欠席のカウント
count0 = {}
with open(IOcsvName,encoding='UTF8') as fo:
atl_reader = csv.reader(fo)
atl_header = next(atl_reader)
# data=fo
print(atl_header)
for row in atl_reader:
data0=row[4]
count0.setdefault(data0,0)
count0[data0] +=1
with open(IOcsvName,encoding='UTF8') as fc:
line_count=sum([1 for line in fc])
li_ct=line_count-1
print(li_ct)
y_list=[]
x_label=[]
#グラフ保存用
fig=plt.figure()
plt.title(file_name_path)
for key0, value0 in count0.items():
att_counter='{}: {:d}'.format(key0,value0)
#y軸設定用
y_list.append(int(value0))
#x軸の文字ラベル用
x_label.append('{}'.format(key0))
#ここでy軸を降順にソート
y_list2=sorted(y_list,reverse=True)
#'遅刻''欠席'が一人もいないとき用の処理(y軸用)
if len(y_list2)==2:
y_list2.append(0)
#要素が2つのとき
elif len(y_list2)==1:
y_list2.append(0)
y_list2.append(0)
#要素が1つのとき
else:
y_list2
#要素が3つのとき
x=[0,1,2]
#このex_labelで出席遅刻欠席の順番を指定
ex_label=['出席','遅刻','欠席']
#'遅刻''欠席'が一人もいないとき用の処理(x軸用)
if len(x_label)==2:
if '出席' in x_label:
if '遅刻' in x_label:
x_label.append('欠席')
#'欠席'がないとき
else:
x_label.append('遅刻')
#'遅刻'がないとき
else:
x_label.append('出席')
#'出席'がないとき ←この場合はいらないとは思うが例外から外すために記載
#要素が2つのとき
elif len(x_label)==1:
if '出席' in x_label:
x_label.append('遅刻')
x_label.append('欠席')
#'遅刻','欠席'がないとき
elif '遅刻' in x_label:
x_label.append('出席')
x_label.append('欠席')
#'出席''欠席'がないとき
else:
x_label.append('出席')
x_label.append('遅刻')
#'出席''遅刻'がないとき ←この場合はいらないとは思うが例外から外すために記載2
else:
x_label
x_label2=sorted(x_label,key=ex_label.index)
#↓棒グラフ作成
print(y_list2)
print(x_label2)
plt.ylim(0,li_ct)
graph=plt.bar(x,y_list2)
#棒の上に数値を挿入するための処理
height=y_list2
for rect in graph:
height=rect.get_height()
plt.annotate('{}'.format(height),xy=(rect.get_x() + rect.get_width()/2,height),xytext=(0,3),textcoords="offset points",ha='center',va='bottom')
plt.xticks(x,x_label2)
plt.show()
#ここまでが一つの出席リストをグラフ化するスクリプト
@eel.expose
def createCumulativeClassGraph(cName):
# 累積講義グラフ作成
# main author: kurita
path = "./Mainproject/IOList/" + cName + "/"
csv_list3 = os.listdir(path)
os.chdir(path)
#csv_list3=glob.glob("/*.csv")
#csv_list3
#print(IOcsvNames)
print(csv_list3)
count1 = {}
# csv_list3=glob.glob(IOcsvNames)
for n in range(len(csv_list3)):
print(csv_list3[n])
with open(csv_list3[n],encoding='UTF8') as f3:
atl_reader3 = csv.reader(f3)
atl_header3 = next(atl_reader3)
#print(atl_header3)
for row in atl_reader3:
data=row[0]
data2=row[4]
count1.setdefault(data,0)
if '出席' in data2:
count1[data] +=1
#alatd_list=[]
#各生徒ごとに'出席'の数をカウント
stnumb_list=[]
atd_count_list=[]
for key, value in count1.items():
att_counter='{}: {:d}'.format(key,value)
#学番と出席数リスト
#alatd_list.append(att_counter)
#学番リスト
stnumb_list.append('{}'.format(key))
#出席数リスト
atd_count_list.append(int(value))
#print(stnumb_list)
#print(atd_count_list)
count2 = {}
for m in range(len(csv_list3)):
with open(csv_list3[m],encoding='UTF8') as f4:
atl_reader4 = csv.reader(f4)
atl_header4 = next(atl_reader4)
#print(atl_header3)
for row in atl_reader4:
data3=row[0]
data4=row[4]
count2.setdefault(data3,0)
if '遅刻' in data4:
count2[data3] +=1
#alatd_list=[]
stnumb_list2=[]
atd_count_list2=[]
for key2, value2 in count2.items():
att_counter2='{}: {:d}'.format(key2,value2)
#学番と出席数リスト
#alatd_list.append(att_counter)
#学番リスト
stnumb_list2.append('{}'.format(key2))
#出席数リスト
atd_count_list2.append(int(value2))
#print(stnumb_list)
#print(atd_count_list)
count3 = {}
for l in range(len(csv_list3)):
with open(csv_list3[l],encoding='UTF8') as f5:
atl_reader5 = csv.reader(f5)
atl_header5 = next(atl_reader5)
#print(atl_header3)
for row in atl_reader5:
data5=row[0]
data6=row[4]
count3.setdefault(data5,0)
if '欠席' in data6:
count3[data5] +=1
#alatd_list=[]
stnumb_list3=[]
atd_count_list3=[]
for key3, value3 in count3.items():
att_counter3='{}: {:d}'.format(key3,value3)
#学番と出席数リスト
#alatd_list.append(att_counter)
#学番リスト
stnumb_list3.append('{}'.format(key3))
#出席数リスト
atd_count_list3.append(int(value3))
#print(stnumb_list)
#print(atd_count_list)
#人数
list_length=len(stnumb_list)
print(list_length)
#リストの先頭('出席'と出席数)を削除
#stnumb_list.remove('出席')
#atd_count_list.remove(list_length)
#print(alatd_list)
#print(stnumb_list)
#print(atd_count_list)
#print(stnumb_list2)
#print(atd_count_list2)
#↓ここから棒グラフ作成
fig=plt.figure()
#学生の数,0から連続した整数のリスト
y_set=list(range(list_length))
graph1=plt.bar(y_set,atd_count_list,align="edge",width=-0.5,color="#44cca3",label="出席")
graph2=plt.bar(y_set,atd_count_list2,align="center",width=0.5,color="#c3cc44",label="遅刻")
graph3=plt.bar(y_set,atd_count_list3,align="edge",width=0.5,color="#cc5844",label="欠席")
plt.xticks(y_set,stnumb_list,rotation=90)
plt.legend()
plt.show()
print(os.getcwd())
os.chdir("./team6/MainProject/")
#これがないと動かないんでよ
while True:
eel.sleep(2.0) |
the-stack_0_10059 | import random
rock = """
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
"""
paper = """
_______
---' ____)____
______)
_______)
_______)
---.__________)
"""
scissors = """
_______
---' ____)____
______)
__________)
(____)
---.__(___)
"""
symbols = [rock, paper, scissors]
user_symbol = int(input("Chosse between Rock, Paper or Scissors (0, 1 or 2) ==> "))
computer_symbol = random.randint(0,2)
if user_symbol > 2:
print("Value out of valid range, retry!")
exit
print("User selected:\n")
print(symbols[user_symbol] + "\n")
print("Computer Selected:\n")
print(symbols[computer_symbol] + "\n")
if user_symbol == 0: #rock
if computer_symbol == 0: #rock vs rock
print("Nobody Win 😶")
elif computer_symbol == 1: #rock vs paper
print("You Lose 😓 ")
else: #rock vs scissors
print("You Win 😁")
elif user_symbol == 1: #paper
if computer_symbol == 0: #paper vs rock
print("You Win 😁")
elif computer_symbol == 1: #paper vs paper
print("Nobody Win 😶")
else: #paper vs scissors
print("You Lose 😓 ")
elif user_symbol == 2: #scissors
if computer_symbol == 0: #scissors vs rock
print("You Lose 😓 ")
elif computer_symbol == 1: #scissors vs paper
print("You Win 😁")
else: #scissors vs scissors
print("Nobody Win 😶") |
the-stack_0_10062 | #!/usr/bin/env python
# Copyright 2019 Jian Wu
# License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
import io
import os
import subprocess
import numpy as np
import soundfile as sf
import scipy.signal as ss
from kaldi_python_io import Reader as BaseReader
from typing import Optional, IO, Union, Any, NoReturn, Tuple
def read_audio(fname: Union[str, IO[Any]],
beg: int = 0,
end: Optional[int] = None,
norm: bool = True,
sr: int = 16000) -> np.ndarray:
"""
Read audio files using soundfile (support multi-channel & chunk)
Args:
fname: file name or object
beg, end: begin and end index for chunk-level reading
norm: normalized samples between -1 and 1
sr: sample rate of the audio
Return:
samps: in shape C x N
sr: sample rate
"""
# samps: N x C or N
# N: number of samples
# C: number of channels
samps, ret_sr = sf.read(fname,
start=beg,
stop=end,
dtype="float32" if norm else "int16")
if sr != ret_sr:
raise RuntimeError(f"Expect sr={sr} of {fname}, get {ret_sr} instead")
if not norm:
samps = samps.astype("float32")
# put channel axis first
# N x C => C x N
if samps.ndim != 1:
samps = np.transpose(samps)
return samps
def write_audio(fname: Union[str, IO[Any]],
samps: np.ndarray,
sr: int = 16000,
norm: bool = True) -> NoReturn:
"""
Write audio files, support single/multi-channel
Args:
fname: IO object or str
samps: np.ndarray, C x S or S
sr: sample rate
norm: keep same as the one in read_audio
"""
samps = samps.astype("float32" if norm else "int16")
# for multi-channel, accept ndarray N x C
if samps.ndim != 1 and samps.shape[0] < samps.shape[1]:
samps = np.transpose(samps)
samps = np.squeeze(samps)
# make dirs
if isinstance(fname, str):
parent = os.path.dirname(fname)
if parent and not os.path.exists(parent):
os.makedirs(parent)
sf.write(fname, samps, sr)
def add_room_response(spk: np.ndarray,
rir: np.ndarray,
early_energy: bool = False,
sr: int = 16000) -> Tuple[np.ndarray, float]:
"""
Convolute source signal with selected rirs
Args
spk: S, close talk signal
rir: N x R, single or multi-channel RIRs
early_energy: return energy of early parts
sr: sample rate of the signal
Return
revb: N x S, reverberated signals
"""
if spk.ndim != 1:
raise RuntimeError(f"Can not convolve rir with {spk.ndim}D signals")
S = spk.shape[-1]
revb = ss.convolve(spk[None, ...], rir)[..., :S]
revb = np.asarray(revb)
if early_energy:
rir_ch0 = rir[0]
rir_peak = np.argmax(rir_ch0)
rir_beg_idx = max(0, int(rir_peak - 0.001 * sr))
rir_end_idx = min(rir_ch0.size, int(rir_peak + 0.05 * sr))
early_rir = np.zeros_like(rir_ch0)
early_rir[rir_beg_idx:rir_end_idx] = rir_ch0[rir_beg_idx:rir_end_idx]
early_rev = ss.convolve(spk, early_rir)[:S]
return revb, np.mean(early_rev**2)
else:
return revb, np.mean(revb[0]**2)
def run_command(command: str, wait: bool = True):
"""
Runs shell commands
"""
p = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if wait:
[stdout, stderr] = p.communicate()
if p.returncode != 0:
stderr_str = bytes.decode(stderr)
raise Exception("There was an error while running the " +
f"command \"{command}\":\n{stderr_str}\n")
return stdout, stderr
else:
return p
class AudioReader(BaseReader):
"""
Sequential/Random Reader for single/multiple channel audio using soundfile as the backend
The format of wav.scp follows Kaldi's definition:
key1 /path/to/key1.wav
key2 /path/to/key2.wav
...
or
key1 sox /home/data/key1.wav -t wav - remix 1 |
key2 sox /home/data/key2.wav -t wav - remix 1 |
...
or
key1 /path/to/ark1:XXXX
key2 /path/to/ark1:XXXY
are supported
Args:
wav_scp: path of the audio script
sr: sample rate of the audio
norm: normalize audio samples between (-1, 1) if true
channel: read audio at #channel if > 0 (-1 means all)
"""
def __init__(self,
wav_scp: str,
sr: int = 16000,
norm: bool = True,
channel: int = -1) -> None:
super(AudioReader, self).__init__(wav_scp, num_tokens=2)
self.sr = sr
self.ch = channel
self.norm = norm
self.mngr = {}
def _load(self, key: str) -> Optional[np.ndarray]:
fname = self.index_dict[key]
samps = None
# return C x N or N
if ":" in fname:
tokens = fname.split(":")
if len(tokens) != 2:
raise RuntimeError(f"Value format error: {fname}")
fname, offset = tokens[0], int(tokens[1])
# get ark object
if fname not in self.mngr:
self.mngr[fname] = open(fname, "rb")
wav_ark = self.mngr[fname]
# wav_ark = open(fname, "rb")
# seek and read
wav_ark.seek(offset)
try:
samps = read_audio(wav_ark, norm=self.norm, sr=self.sr)
except RuntimeError:
print(f"Read audio {key} {fname}:{offset} failed...",
flush=True)
else:
if fname[-1] == "|":
shell, _ = run_command(fname[:-1], wait=True)
fname = io.BytesIO(shell)
try:
samps = read_audio(fname, norm=self.norm, sr=self.sr)
except RuntimeError:
print(f"Load audio {key} {fname} failed...", flush=True)
if samps is None:
raise RuntimeError("Audio IO failed ...")
if self.ch >= 0 and samps.ndim == 2:
samps = samps[self.ch]
return samps
def nsamps(self, key: str) -> int:
"""
Number of samples
"""
data = self._load(key)
return data.shape[-1]
def power(self, key: str) -> float:
"""
Power of utterance
"""
data = self._load(key)
s = data if data.ndim == 1 else data[0]
return np.linalg.norm(s, 2)**2 / data.size
def duration(self, key: str) -> float:
"""
Utterance duration
"""
N = self.nsamps(key)
return N / self.sr
|
the-stack_0_10064 | # -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'DocStack'
copyright = '2019, d05660'
author = 'd05660'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = '1.0'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
# html_theme = 'alabaster'
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
html_theme = 'default'
else:
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
html_show_sourcelink = False
html_favicon = 'favicon.ico'
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DocStackdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DocStack.tex', 'DocStack Documentation',
'd05660', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'docstack', 'DocStack Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DocStack', 'DocStack Documentation',
author, 'DocStack', 'One line description of project.',
'Miscellaneous'),
]
def setup(app):
app.add_stylesheet('css/custom.css?v20190329')
|
the-stack_0_10065 | # -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('authentication', '0008_auto_20151014_2027'),
]
operations = [
migrations.AlterField(
model_name='position',
name='committee',
field=models.CharField(default=b'hs', max_length=10, verbose_name='komite', choices=[(b'hs', 'Hovedstyret'), (b'appkom', 'Applikasjonskomiteen'), (b'arrkom', 'Arrangementskomiteen'), (b'bankom', 'Bank- og \xf8konomikomiteen'), (b'bedkom', 'Bedriftskomiteen'), (b'dotkom', 'Drifts- og utviklingskomiteen'), (b'ekskom', 'Ekskursjonskomiteen'), (b'fagkom', 'Fag- og kurskomiteen'), (b'jubkom', 'Jubileumskomiteen'), (b'pangkom', 'Pensjonistkomiteen'), (b'prokom', 'Profil-og aviskomiteen'), (b'redaksjonen', 'Redaksjonen'), (b'trikom', 'Trivselskomiteen'), (b'velkom', 'Velkomstkomiteen')]),
preserve_default=True,
),
migrations.AlterField(
model_name='position',
name='position',
field=models.CharField(default=b'medlem', max_length=10, verbose_name='stilling', choices=[(b'medlem', 'Medlem'), (b'leder', 'Leder'), (b'nestleder', 'Nestleder'), (b'redaktor', 'Redakt\xf8r'), (b'okoans', '\xd8konomiansvarlig')]),
preserve_default=True,
),
]
|
the-stack_0_10067 | from django.shortcuts import render
# pdf
from django.http import FileResponse
import io
from reportlab.pdfgen import canvas
from reportlab.lib.units import inch
from reportlab.lib.pagesizes import letter
import os
from django.conf import settings
# Create your views here.
def get_pdf_name(request):
pdf_dir = os.listdir(os.path.join(settings.MEDIA_ROOT, 'sample_pdf'))
pdf_dir_path = list(map( lambda x : '/{}/{}/{}'.format('media', 'sample_pdf', x), pdf_dir))
res = []
for ind, item in enumerate(pdf_dir):
res.append("<a href='{}' target='_blank' >{}</a>".format(pdf_dir_path[ind], item))
content = {'list' : res}
return render(request, 'index.html', content)
def get_pdf(request):
# ccreaate Bytestream Buffer
buf = io.BytesIO()
# create a canvas
c = canvas.Canvas(buf, pagesize=letter, bottomup=0)
# Create a text Object
textob = c.beginText()
textob.setTextOrigin(inch, inch)
textob.setFont("Helvetica", 14)
# add Some lines of text
lines = [
"This is line one",
"This is line two",
"This is line three",
]
for line in lines:
textob.textLine(line)
#Finsh up
c.drawText(textob)
c.showPage()
c.save()
buf.seek(0)
response = FileResponse(buf, as_attachment=True, filename='file.pdf')
response.headers['Content-Type'] = 'application/pdf'
return response
contnet = {}
return render(request, 'index.html', content) |
the-stack_0_10070 | #!/usr/bin/python
#
# CLI compiler for bcmd's new model description language
#
import sys
import argparse
import bcmd_yacc
import os
import decimal
import string
import pprint
import logger
import ast
import codegen
import info
# default compiler configuration
# (this is effectively a template whose details
# may be adapted by command line args)
CONFIG = {'modelpath': ['.', 'models'],
'outdir': '.',
'outfile': None,
'treefile': None,
'name': None,
'unused': True,
'graph': None,
'graph-exclude-unused': False,
'graph-exclude-init': False,
'graph-exclude-self': True,
'graph-exclude-clusters': False,
'graph-exclude-params': False,
'independent': 't',
'input-makes-intermed': True}
# these are effectively constants
VERSION = 0.6
MODELDEF_EXT = '.modeldef'
CODE_EXT = '.c'
MODEL_EXT = '.model'
TREE_EXT = '.tree'
COMPILE_EXT = '.bcmpl'
GRAPHVIZ_EXT = '.gv'
DUMMY_SOURCE = '##\n'
# parse a chosen model definition file and return the AST
def parse_file(filename):
try:
f = open(filename)
data = f.read()
f.close()
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
return None
logger.message("Processing file: " + filename)
bcmd_yacc.currentFile = filename
errsBefore = len(bcmd_yacc.compilationInfo['errors'])
lp = bcmd_yacc.get_lexer_parser()
result = lp[1].parse(data, lexer=lp[0])
fileErrs = len(bcmd_yacc.compilationInfo['errors']) - errsBefore
bcmd_yacc.currentFile = None
if fileErrs == 1:
logger.error('Compilation failed with 1 syntax error')
elif fileErrs > 1:
logger.error('Compilation failed with %d syntax errors' % fileErrs)
return fileErrs, result
def print_errors():
logger.error('*** Summary of model compilation errors ***')
errs = bcmd_yacc.compilationInfo
for ii in range(len(errs['errors'])):
logger.error(errs['messages'][ii]
+ ' (' + errs['files'][ii]
+ ', line ' + str(errs['lines'][ii]) + ')')
# find a file on the search path
def search_file(filename, search_path):
for path in search_path:
candidate = os.path.join(path, filename)
if os.path.isfile(candidate):
return os.path.abspath(candidate)
return None
# process arguments
def process_args():
config = CONFIG
ap = argparse.ArgumentParser(
description="Model compiler for the BCMD modelling system.")
ap.add_argument('--version', action='version',
version='bcmd version %.1fa' % VERSION)
ap.add_argument(
'-i', help='append to default model search path', metavar='PATH')
ap.add_argument(
'-I', help='replace default model search path', metavar='PATH')
ap.add_argument(
'-n', '--name', help='specify model name (default: <file1>)', metavar='NAME')
ap.add_argument(
'-o', help='specify output file name (default: <modelname>.model)', metavar='FILE')
ap.add_argument(
'-d', help='specify output directory (default: .)', metavar='DIR')
ap.add_argument(
'-u', '--unused', help='omit apparently unused intermediates', action='store_false')
ap.add_argument(
'-g', '--debug', help='include debug outputs in generated model code', action='store_true')
ap.add_argument('-t', '--tree', help='write parse tree to file (default: <modelname>.tree)',
nargs='?', default=None, const='', metavar='FILE')
ap.add_argument('-p', '--processed', help='write compilation data to file (default: <modelname>.bcmpl)',
nargs='?', default=None, const='', metavar='FILE')
ap.add_argument('-G', '--graph', help='write dependency structure in GraphViz format (default: <modelname>.gv)',
nargs='?', default=None, const='', metavar='FILE')
ap.add_argument('-U', '--graphxunused',
help='exclude apparently unused elements from graph output', action='store_true')
ap.add_argument('-N', '--graphxinit',
help='exclude initialisation dependencies from graph output', action='store_true')
ap.add_argument('-C', '--graphxclust',
help='exclude clustering from graph output', action='store_true')
ap.add_argument('-S', '--graphself',
help='include direct circular dependencies in graph output', action='store_false')
ap.add_argument('-v', '--verbose',
help='set level of detail logged to stderr (0-7, default: 3)', metavar='LEVEL', type=int)
ap.add_argument(
'-Y', '--yacc', help='run a dummy parse to rebuild parse tables', action='store_true')
# ... add further options here as needed ...
ap.add_argument('file', nargs='+',
help='one or more model description files to be compiled')
args = ap.parse_args()
if args.yacc:
lp = bcmd_yacc.get_lexer_parser()
result = lp[1].parse(DUMMY_SOURCE, lexer=lp[0])
return False
if not (args.I is None):
config['modelpath'] = args.I.split(os.pathsep)
elif not (args.i is None):
config['modelpath'] = config['modelpath'] + \
args.i.split(os.pathsep)
if not (args.name is None):
config['name'] = args.name
else:
srcname, srcext = os.path.splitext(args.file[0])
config['name'] = srcname
if not (args.o is None):
config['outfile'] = args.o
else:
config['outfile'] = config['name'] + MODEL_EXT
if args.d is not None:
if not os.path.isdir(args.d):
os.makedirs(args.d)
config['outdir'] = args.d
config['treefile'] = args.tree
config['compfile'] = args.processed
config['sources'] = args.file
config['unused'] = args.unused
config['debug'] = args.debug
config['graph'] = args.graph
config['graph-exclude-unused'] = args.graphxunused
config['graph-exclude-init'] = args.graphxinit
config['graph-exclude-self'] = args.graphself
config['graph-exclude-clusters'] = args.graphxclust
if args.verbose is not None:
logger.verbosity = args.verbose
return config
# load and parse source files named on the command line, plus imports
# note failures and return a structure including those details and
# the resulting merged item list
def load_sources(config):
sources = config['sources']
srcIndex = 0
parsedSources = []
failedSources = []
merged = []
while srcIndex < len(sources):
logger.message("Searching for source file: " + sources[srcIndex])
src = search_file(sources[srcIndex], config['modelpath'])
if (src is None) and (not sources[srcIndex].endswith(MODELDEF_EXT)):
logger.message(
"Not found, trying with added extension: " + sources[srcIndex] + MODELDEF_EXT)
src = search_file(sources[srcIndex] +
MODELDEF_EXT, config['modelpath'])
if src is None:
logger.warn("File not found: " + sources[srcIndex])
failedSources.append(sources[srcIndex])
else:
nErrs, ast = parse_file(src)
if nErrs > 0 or ast is None:
failedSources.append(src)
else:
ast = list(ast)
# add imports that are not already in the source list to it
for imp in list(sum([x[1:] for x in ast if x[0] == 'import'], ())):
if imp not in sources and imp + MODELDEF_EXT not in sources:
sources.append(imp)
logger.detail(ast, prettify=True)
parsedSources.append((sources[srcIndex], src))
merged = merged + ast
srcIndex = srcIndex + 1
logger.message("Total number of attempted source files: %d" % srcIndex)
logger.message("%d parsed, %d failed" %
(len(parsedSources), len(failedSources)))
for failed in failedSources:
logger.message(" -> %s" % failed)
return {'sources': sources, 'parsed': parsedSources, 'failed': failedSources, 'merged': merged}
# write the loaded merged item list to a file, if so specified
def write_tree(config, work):
if not config['treefile'] is None:
if config['treefile'] == '':
config['treefile'] = config['name'] + TREE_EXT
treePath = os.path.join(config['outdir'], config['treefile'])
logger.message("Attempting to write parse tree to " + treePath)
try:
treeStream = open(treePath, 'w')
pprint.pprint(work['merged'], stream=treeStream)
treeStream.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
# write the processed model structure to a file, if so specified
def write_comp(config, processed):
if not config['compfile'] is None:
if config['compfile'] == '':
config['compfile'] = config['name'] + COMPILE_EXT
compPath = os.path.join(config['outdir'], config['compfile'])
logger.message(
"Attempting to write compilation structure to " + compPath)
try:
compStream = open(compPath, 'w')
pprint.pprint(processed, stream=compStream)
compStream.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
# write the model dependencies to a graph, if so specified
def write_graph(config, model):
if not config['graph'] is None:
if config['graph'] == '':
config['graph'] = config['name'] + GRAPHVIZ_EXT
graphPath = os.path.join(config['outdir'], config['graph'])
logger.message("Attempting to write dependency graph to " + graphPath)
try:
stream = open(graphPath, 'w')
print(info.generateGraphViz(model, config), file=stream)
stream.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
# ----------------------------------------------------------------------------
# main entry point of this compiler script
if __name__ == '__main__':
config = process_args()
if not config:
sys.exit(2)
work = load_sources(config)
if len(work['failed']) > 0:
print_errors()
sys.exit(1)
write_tree(config, work)
processed = ast.process(work['merged'], work[
'parsed'], config['independent'])
info.logModelInfo(processed, config)
write_comp(config, processed)
write_graph(config, processed)
source = codegen.generateSource(processed, config)
codepath = os.path.join(config['outdir'], config['name'] + CODE_EXT)
logger.message("Attempting to write C code to " + codepath)
try:
cfile = open(codepath, 'w')
cfile.write(source)
cfile.close()
except IOError as e:
logger.error("Error writing file ({0}): {1}".format(
e.errno, e.strerror))
sys.exit(1)
|
the-stack_0_10071 | import os
import sys
sys.path.insert(0, ".")
sys.path.insert(1, "..")
from praw import __version__
copyright = "2020, Bryce Boe"
exclude_patterns = ["_build"]
extensions = ["sphinx.ext.autodoc", "sphinx.ext.intersphinx"]
html_static_path = ["_static"]
html_theme = "sphinx_rtd_theme"
html_theme_options = {"collapse_navigation": True}
htmlhelp_basename = "PRAW"
intersphinx_mapping = {"python": ("https://docs.python.org/3.8", None)}
master_doc = "index"
nitpicky = True
project = "PRAW"
pygments_style = "sphinx"
release = __version__
source_suffix = ".rst"
suppress_warnings = ["image.nonlocal_uri"]
version = ".".join(__version__.split(".", 2)[:2])
# Use RTD theme locally
if not os.environ.get("READTHEDOCS"):
import sphinx_rtd_theme
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
def skip(app, what, name, obj, skip, options):
if name in {
"__call__",
"__contains__",
"__getitem__",
"__init__",
"__iter__",
"__len__",
}:
return False
return skip
def setup(app):
app.connect("autodoc-skip-member", skip)
app.add_stylesheet("theme_override.css")
|
the-stack_0_10072 | import filecmp
import logging
import os
import textwrap
import uuid
from pathlib import Path
from unittest import mock
import pytest
from dvc.cli import main
from dvc.dependency.base import DependencyIsStageFileError
from dvc.dvcfile import DVC_FILE_SUFFIX
from dvc.exceptions import (
ArgumentDuplicationError,
CircularDependencyError,
CyclicGraphError,
OutputDuplicationError,
OverlappingOutputPathsError,
StagePathAsOutputError,
)
from dvc.fs import system
from dvc.objects.hash import file_md5
from dvc.output import Output, OutputIsStageFileError
from dvc.repo import Repo as DvcRepo
from dvc.stage import Stage
from dvc.stage.exceptions import (
StageFileAlreadyExistsError,
StageFileBadNameError,
StagePathNotDirectoryError,
StagePathNotFoundError,
StagePathOutsideError,
)
from dvc.utils.serialize import load_yaml
from tests.basic_env import TestDvc, TestDvcGit
class TestRun(TestDvc):
def test(self):
cmd = "python {} {} {}".format(self.CODE, self.FOO, "out")
deps = [self.FOO, self.CODE]
outs = [os.path.join(self.dvc.root_dir, "out")]
outs_no_cache = []
fname = "out.dvc"
self.dvc.add(self.FOO)
stage = self.dvc.run(
cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname=fname,
single_stage=True,
)
self.assertTrue(filecmp.cmp(self.FOO, "out", shallow=False))
self.assertTrue(os.path.isfile(stage.path))
self.assertEqual(stage.cmd, cmd)
self.assertEqual(len(stage.deps), len(deps))
self.assertEqual(len(stage.outs), len(outs + outs_no_cache))
self.assertEqual(stage.outs[0].fspath, outs[0])
self.assertEqual(
stage.outs[0].hash_info.value, file_md5(self.FOO, self.dvc.fs)
)
self.assertTrue(stage.path, fname)
with self.assertRaises(OutputDuplicationError):
self.dvc.run(
cmd=cmd,
deps=deps,
outs=outs,
outs_no_cache=outs_no_cache,
fname="duplicate" + fname,
single_stage=True,
)
class TestRunEmpty(TestDvc):
def test(self):
self.dvc.run(
cmd="echo hello world",
deps=[],
outs=[],
outs_no_cache=[],
fname="empty.dvc",
single_stage=True,
)
class TestRunMissingDep(TestDvc):
def test(self):
from dvc.dependency.base import DependencyDoesNotExistError
with self.assertRaises(DependencyDoesNotExistError):
self.dvc.run(
cmd="command",
deps=["non-existing-dep"],
outs=[],
outs_no_cache=[],
fname="empty.dvc",
single_stage=True,
)
class TestRunNoExec(TestDvcGit):
def test(self):
self.dvc.run(
cmd="python {} {} {}".format(self.CODE, self.FOO, "out"),
deps=[self.CODE, self.FOO],
outs=["out"],
no_exec=True,
single_stage=True,
)
self.assertFalse(os.path.exists("out"))
with open(".gitignore", encoding="utf-8") as fobj:
self.assertEqual(fobj.read(), "/out\n")
class TestRunCircularDependency(TestDvc):
def test(self):
with self.assertRaises(CircularDependencyError):
self.dvc.run(
cmd="command",
deps=[self.FOO],
outs=[self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_outs_no_cache(self):
with self.assertRaises(CircularDependencyError):
self.dvc.run(
cmd="command",
deps=[self.FOO],
outs_no_cache=[self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_non_normalized_paths(self):
with self.assertRaises(CircularDependencyError):
self.dvc.run(
cmd="command",
deps=["./foo"],
outs=["foo"],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_graph(self):
self.dvc.run(
deps=[self.FOO],
outs=["bar.txt"],
cmd="echo bar > bar.txt",
single_stage=True,
)
self.dvc.run(
deps=["bar.txt"],
outs=["baz.txt"],
cmd="echo baz > baz.txt",
single_stage=True,
)
with self.assertRaises(CyclicGraphError):
self.dvc.run(
deps=["baz.txt"],
outs=[self.FOO],
cmd="echo baz > foo",
single_stage=True,
)
class TestRunDuplicatedArguments(TestDvc):
def test(self):
with self.assertRaises(ArgumentDuplicationError):
self.dvc.run(
cmd="command",
deps=[],
outs=[self.FOO, self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_outs_no_cache(self):
with self.assertRaises(ArgumentDuplicationError):
self.dvc.run(
cmd="command",
outs=[self.FOO],
outs_no_cache=[self.FOO],
fname="circular-dependency.dvc",
single_stage=True,
)
def test_non_normalized_paths(self):
with self.assertRaises(ArgumentDuplicationError):
self.dvc.run(
cmd="command",
deps=[],
outs=["foo", "./foo"],
fname="circular-dependency.dvc",
single_stage=True,
)
class TestRunStageInsideOutput(TestDvc):
def test_cwd(self):
self.dvc.run(
cmd=f"mkdir {self.DATA_DIR}",
deps=[],
outs=[self.DATA_DIR],
single_stage=True,
)
with self.assertRaises(StagePathAsOutputError):
self.dvc.run(
cmd="command",
fname=os.path.join(self.DATA_DIR, "inside-cwd.dvc"),
single_stage=True,
)
def test_file_name(self):
self.dvc.run(
cmd=f"mkdir {self.DATA_DIR}",
deps=[],
outs=[self.DATA_DIR],
single_stage=True,
)
with self.assertRaises(StagePathAsOutputError):
self.dvc.run(
cmd="command",
outs=[self.FOO],
fname=os.path.join(self.DATA_DIR, "inside-cwd.dvc"),
single_stage=True,
)
class TestRunBadCwd(TestDvc):
def test(self):
with self.assertRaises(StagePathOutsideError):
self.dvc.run(cmd="command", wdir=self.mkdtemp(), single_stage=True)
def test_same_prefix(self):
with self.assertRaises(StagePathOutsideError):
path = f"{self._root_dir}-{uuid.uuid4()}"
os.mkdir(path)
self.dvc.run(cmd="command", wdir=path, single_stage=True)
class TestRunBadWdir(TestDvc):
def test(self):
with self.assertRaises(StagePathOutsideError):
self.dvc.run(cmd="command", wdir=self.mkdtemp(), single_stage=True)
def test_same_prefix(self):
with self.assertRaises(StagePathOutsideError):
path = f"{self._root_dir}-{uuid.uuid4()}"
os.mkdir(path)
self.dvc.run(cmd="command", wdir=path, single_stage=True)
def test_not_found(self):
with self.assertRaises(StagePathNotFoundError):
path = os.path.join(self._root_dir, str(uuid.uuid4()))
self.dvc.run(cmd="command", wdir=path, single_stage=True)
def test_not_dir(self):
with self.assertRaises(StagePathNotDirectoryError):
path = os.path.join(self._root_dir, str(uuid.uuid4()))
os.mkdir(path)
path = os.path.join(path, str(uuid.uuid4()))
open(path, "a", encoding="utf-8").close()
self.dvc.run(cmd="command", wdir=path, single_stage=True)
class TestRunBadName(TestDvc):
def test(self):
with self.assertRaises(StagePathOutsideError):
self.dvc.run(
cmd="command",
fname=os.path.join(self.mkdtemp(), self.FOO + DVC_FILE_SUFFIX),
single_stage=True,
)
def test_same_prefix(self):
with self.assertRaises(StagePathOutsideError):
path = f"{self._root_dir}-{uuid.uuid4()}"
os.mkdir(path)
self.dvc.run(
cmd="command",
fname=os.path.join(path, self.FOO + DVC_FILE_SUFFIX),
single_stage=True,
)
def test_not_found(self):
with self.assertRaises(StagePathNotFoundError):
path = os.path.join(self._root_dir, str(uuid.uuid4()))
self.dvc.run(
cmd="command",
fname=os.path.join(path, self.FOO + DVC_FILE_SUFFIX),
single_stage=True,
)
class TestRunRemoveOuts(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("import os\n")
fobj.write("if os.path.exists(sys.argv[1]):\n")
fobj.write(" sys.exit(1)\n")
fobj.write("open(sys.argv[1], 'w+').close()\n")
self.dvc.run(
deps=[self.CODE],
outs=[self.FOO],
cmd=f"python {self.CODE} {self.FOO}",
single_stage=True,
)
class TestRunUnprotectOutsCopy(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("with open(sys.argv[1], 'a+') as fobj:\n")
fobj.write(" fobj.write('foo')\n")
ret = main(["config", "cache.type", "copy"])
self.assertEqual(ret, 0)
ret = main(
[
"run",
"-d",
self.CODE,
"-o",
self.FOO,
"--single-stage",
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.access(self.FOO, os.W_OK))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
ret = main(
[
"run",
"--force",
"--no-run-cache",
"--single-stage",
"-d",
self.CODE,
"-o",
self.FOO,
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.access(self.FOO, os.W_OK))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
class TestRunUnprotectOutsSymlink(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("import os\n")
fobj.write("with open(sys.argv[1], 'a+') as fobj:\n")
fobj.write(" fobj.write('foo')\n")
ret = main(["config", "cache.type", "symlink"])
self.assertEqual(ret, 0)
self.assertEqual(ret, 0)
ret = main(
[
"run",
"-d",
self.CODE,
"-o",
self.FOO,
"--single-stage",
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
if os.name == "nt":
# NOTE: Windows symlink perms don't propagate to the target
self.assertTrue(os.access(self.FOO, os.W_OK))
else:
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_symlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
ret = main(
[
"run",
"--force",
"--no-run-cache",
"--single-stage",
"-d",
self.CODE,
"-o",
self.FOO,
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
if os.name == "nt":
# NOTE: Windows symlink perms don't propagate to the target
self.assertTrue(os.access(self.FOO, os.W_OK))
else:
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_symlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
class TestRunUnprotectOutsHardlink(TestDvc):
def test(self):
with open(self.CODE, "w+", encoding="utf-8") as fobj:
fobj.write("import sys\n")
fobj.write("import os\n")
fobj.write("with open(sys.argv[1], 'a+') as fobj:\n")
fobj.write(" fobj.write('foo')\n")
ret = main(["config", "cache.type", "hardlink"])
self.assertEqual(ret, 0)
self.assertEqual(ret, 0)
ret = main(
[
"run",
"-d",
self.CODE,
"-o",
self.FOO,
"--single-stage",
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_hardlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
ret = main(
[
"run",
"--force",
"--no-run-cache",
"--single-stage",
"-d",
self.CODE,
"-o",
self.FOO,
"python",
self.CODE,
self.FOO,
]
)
self.assertEqual(ret, 0)
self.assertFalse(os.access(self.FOO, os.W_OK))
self.assertTrue(system.is_hardlink(self.FOO))
with open(self.FOO, encoding="utf-8") as fd:
self.assertEqual(fd.read(), "foo")
class TestCmdRunOverwrite(TestDvc):
def test(self):
# NOTE: using sleep() is a workaround for filesystems
# with low mtime resolution. We have to use mtime since
# comparing mtime's is the only way to check that the stage
# file didn't change(size and inode in the first test down
# below don't change).
import time
ret = main(
[
"run",
"-d",
self.FOO,
"-d",
self.CODE,
"-o",
"out",
"--file",
"out.dvc",
"--single-stage",
"python",
self.CODE,
self.FOO,
"out",
]
)
self.assertEqual(ret, 0)
stage_mtime = os.path.getmtime("out.dvc")
time.sleep(1)
ret = main(
[
"run",
"-d",
self.FOO,
"-d",
self.CODE,
"--force",
"--no-run-cache",
"--single-stage",
"-o",
"out",
"--file",
"out.dvc",
"python",
self.CODE,
self.FOO,
"out",
]
)
self.assertEqual(ret, 0)
# NOTE: check that dvcfile was overwritten
self.assertNotEqual(stage_mtime, os.path.getmtime("out.dvc"))
stage_mtime = os.path.getmtime("out.dvc")
time.sleep(1)
ret = main(
[
"run",
"--force",
"--single-stage",
"--file",
"out.dvc",
"-d",
self.BAR,
f"cat {self.BAR}",
]
)
self.assertEqual(ret, 0)
# NOTE: check that dvcfile was overwritten
self.assertNotEqual(stage_mtime, os.path.getmtime("out.dvc"))
class TestCmdRunCliMetrics(TestDvc):
def test_cached(self):
ret = main(
[
"run",
"-m",
"metrics.txt",
"--single-stage",
"echo test > metrics.txt",
]
)
self.assertEqual(ret, 0)
with open("metrics.txt", encoding="utf-8") as fd:
self.assertEqual(fd.read().rstrip(), "test")
def test_not_cached(self):
ret = main(
[
"run",
"-M",
"metrics.txt",
"--single-stage",
"echo test > metrics.txt",
]
)
self.assertEqual(ret, 0)
with open("metrics.txt", encoding="utf-8") as fd:
self.assertEqual(fd.read().rstrip(), "test")
class TestCmdRunWorkingDirectory(TestDvc):
def test_default_wdir_is_not_written(self):
stage = self.dvc.run(
cmd=f"echo test > {self.FOO}",
outs=[self.FOO],
wdir=".",
single_stage=True,
)
d = load_yaml(stage.relpath)
self.assertNotIn(Stage.PARAM_WDIR, d.keys())
stage = self.dvc.run(
cmd=f"echo test > {self.BAR}", outs=[self.BAR], single_stage=True
)
d = load_yaml(stage.relpath)
self.assertNotIn(Stage.PARAM_WDIR, d.keys())
def test_fname_changes_path_and_wdir(self):
dname = "dir"
os.mkdir(os.path.join(self._root_dir, dname))
foo = os.path.join(dname, self.FOO)
fname = os.path.join(dname, "stage" + DVC_FILE_SUFFIX)
stage = self.dvc.run(
cmd=f"echo test > {foo}",
outs=[foo],
fname=fname,
single_stage=True,
)
self.assertEqual(stage.wdir, os.path.realpath(self._root_dir))
self.assertEqual(
stage.path, os.path.join(os.path.realpath(self._root_dir), fname)
)
# Check that it is dumped properly (relative to fname)
d = load_yaml(stage.relpath)
self.assertEqual(d[Stage.PARAM_WDIR], "..")
def test_rerun_deterministic(tmp_dir, run_copy, mocker):
from dvc.stage.run import subprocess
tmp_dir.gen("foo", "foo content")
spy = mocker.spy(subprocess, "Popen")
run_copy("foo", "out", single_stage=True)
assert spy.called
spy.reset_mock()
run_copy("foo", "out", single_stage=True)
assert not spy.called
def test_rerun_deterministic_ignore_cache(tmp_dir, run_copy, mocker):
from dvc.stage.run import subprocess
tmp_dir.gen("foo", "foo content")
spy = mocker.spy(subprocess, "Popen")
run_copy("foo", "out", single_stage=True)
assert spy.called
spy.reset_mock()
run_copy("foo", "out", run_cache=False, single_stage=True)
assert spy.called
def test_rerun_callback(dvc):
def run_callback(force=False):
return dvc.run(
cmd="echo content > out", force=force, single_stage=True
)
assert run_callback() is not None
with pytest.raises(StageFileAlreadyExistsError):
assert run_callback() is not None
assert run_callback(force=True) is not None
def test_rerun_changed_dep(tmp_dir, run_copy):
tmp_dir.gen("foo", "foo content")
assert run_copy("foo", "out", single_stage=True) is not None
tmp_dir.gen("foo", "changed content")
with pytest.raises(StageFileAlreadyExistsError):
run_copy("foo", "out", force=False, single_stage=True)
assert run_copy("foo", "out", force=True, single_stage=True)
def test_rerun_changed_stage(tmp_dir, run_copy):
tmp_dir.gen("foo", "foo content")
assert run_copy("foo", "out", single_stage=True) is not None
tmp_dir.gen("bar", "bar content")
with pytest.raises(StageFileAlreadyExistsError):
run_copy("bar", "out", force=False, single_stage=True)
def test_rerun_changed_out(tmp_dir, run_copy):
tmp_dir.gen("foo", "foo content")
assert run_copy("foo", "out", single_stage=True) is not None
Path("out").write_text("modification", encoding="utf-8")
with pytest.raises(StageFileAlreadyExistsError):
run_copy("foo", "out", force=False, single_stage=True)
class TestRunCommit(TestDvc):
def test(self):
fname = "test"
ret = main(
[
"run",
"-o",
fname,
"--no-commit",
"--single-stage",
"echo",
"test",
">",
fname,
]
)
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(fname))
self.assertFalse(os.path.exists(self.dvc.odb.local.cache_dir))
ret = main(["commit", fname + ".dvc"])
self.assertEqual(ret, 0)
self.assertTrue(os.path.isfile(fname))
self.assertEqual(len(os.listdir(self.dvc.odb.local.cache_dir)), 1)
class TestRunPersist(TestDvc):
@property
def outs_command(self):
raise NotImplementedError
def _test(self):
file = "file.txt"
file_content = "content"
stage_file = file + DVC_FILE_SUFFIX
self.run_command(file, file_content)
self.stage_should_contain_persist_flag(stage_file)
self.should_append_upon_repro(file, stage_file)
self.should_remove_persistent_outs(file, stage_file)
def run_command(self, file, file_content):
ret = main(
[
"run",
"--single-stage",
"--always-changed",
self.outs_command,
file,
f"echo {file_content} >> {file}",
]
)
self.assertEqual(0, ret)
def stage_should_contain_persist_flag(self, stage_file):
stage_file_content = load_yaml(stage_file)
self.assertEqual(
True, stage_file_content["outs"][0][Output.PARAM_PERSIST]
)
def should_append_upon_repro(self, file, stage_file):
ret = main(["repro", stage_file])
self.assertEqual(0, ret)
with open(file, encoding="utf-8") as fobj:
lines = fobj.readlines()
self.assertEqual(2, len(lines))
def should_remove_persistent_outs(self, file, stage_file):
ret = main(["remove", stage_file, "--outs"])
self.assertEqual(0, ret)
self.assertFalse(os.path.exists(file))
class TestRunPersistOuts(TestRunPersist):
@property
def outs_command(self):
return "--outs-persist"
def test(self):
self._test()
class TestRunPersistOutsNoCache(TestRunPersist):
@property
def outs_command(self):
return "--outs-persist-no-cache"
def test(self):
self._test()
class TestShouldRaiseOnOverlappingOutputPaths(TestDvc):
def test(self):
ret = main(["add", self.DATA_DIR])
self.assertEqual(0, ret)
with self.assertRaises(OverlappingOutputPathsError) as err:
self.dvc.run(
outs=[self.DATA],
cmd=f"echo data >> {self.DATA}",
single_stage=True,
)
error_output = str(err.exception)
data_dir_stage = self.DATA_DIR + DVC_FILE_SUFFIX
data_stage = os.path.basename(self.DATA) + DVC_FILE_SUFFIX
self.assertIn("The output paths:\n", error_output)
self.assertIn(
f"\n'{self.DATA_DIR}'('{data_dir_stage}')\n", error_output
)
self.assertIn(f"\n'{self.DATA}'('{data_stage}')\n", error_output)
self.assertIn(
"overlap and are thus in the same tracked directory.\n"
"To keep reproducibility, outputs should be in separate "
"tracked directories or tracked individually.",
error_output,
)
class TestRerunWithSameOutputs(TestDvc):
def _read_content_only(self, path):
with open(path, encoding="utf-8") as fobj:
return [line.rstrip() for line in fobj]
@property
def _outs_command(self):
raise NotImplementedError
def _run_twice_with_same_outputs(self):
ret = main(
[
"run",
"--single-stage",
"--outs",
self.FOO,
f"echo {self.FOO_CONTENTS} > {self.FOO}",
]
)
self.assertEqual(0, ret)
output_file_content = self._read_content_only(self.FOO)
self.assertEqual([self.FOO_CONTENTS], output_file_content)
ret = main(
[
"run",
self._outs_command,
self.FOO,
"--force",
"--single-stage",
f"echo {self.BAR_CONTENTS} >> {self.FOO}",
]
)
self.assertEqual(0, ret)
class TestNewRunShouldRemoveOutsOnNoPersist(TestRerunWithSameOutputs):
def test(self):
self._run_twice_with_same_outputs()
output_file_content = self._read_content_only(self.FOO)
self.assertEqual([self.BAR_CONTENTS], output_file_content)
@property
def _outs_command(self):
return "--outs"
class TestNewRunShouldNotRemoveOutsOnPersist(TestRerunWithSameOutputs):
def test(self):
self._run_twice_with_same_outputs()
output_file_content = self._read_content_only(self.FOO)
self.assertEqual(
[self.FOO_CONTENTS, self.BAR_CONTENTS], output_file_content
)
@property
def _outs_command(self):
return "--outs-persist"
class TestShouldNotCheckoutUponCorruptedLocalHardlinkCache(TestDvc):
def setUp(self):
super().setUp()
ret = main(["config", "cache.type", "hardlink"])
self.assertEqual(ret, 0)
self.dvc.close()
self.dvc = DvcRepo(".")
def test(self):
from tests.utils import clean_staging
cmd = f"python {self.CODE} {self.FOO} {self.BAR}"
stage = self.dvc.run(
deps=[self.FOO], outs=[self.BAR], cmd=cmd, single_stage=True
)
clean_staging()
os.chmod(self.BAR, 0o644)
with open(self.BAR, "w", encoding="utf-8") as fd:
fd.write("corrupting the output cache")
patch_checkout = mock.patch.object(
stage.outs[0], "checkout", wraps=stage.outs[0].checkout
)
from dvc.stage.run import cmd_run
patch_run = mock.patch("dvc.stage.run.cmd_run", wraps=cmd_run)
with self.dvc.lock:
with patch_checkout as mock_checkout:
with patch_run as mock_run:
stage.run()
mock_run.assert_called_once()
mock_checkout.assert_not_called()
def test_bad_stage_fname(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo content")
with pytest.raises(StageFileBadNameError):
# fname should end with .dvc
run_copy("foo", "foo_copy", fname="out_stage", single_stage=True)
# Check that command hasn't been run
assert not (tmp_dir / "foo_copy").exists()
def test_should_raise_on_stage_dependency(run_copy):
with pytest.raises(DependencyIsStageFileError):
run_copy("name.dvc", "stage_copy", single_stage=True)
def test_should_raise_on_stage_output(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo content")
with pytest.raises(OutputIsStageFileError):
run_copy("foo", "name.dvc", single_stage=True)
@pytest.mark.parametrize("metrics_type", ["metrics", "metrics_no_cache"])
def test_metrics_dir(tmp_dir, dvc, caplog, run_copy_metrics, metrics_type):
copyargs = {metrics_type: ["dir_metric"]}
tmp_dir.gen({"dir": {"file": "content"}})
with caplog.at_level(logging.DEBUG, "dvc"):
run_copy_metrics("dir", "dir_metric", **copyargs)
assert (
"directory 'dir_metric' cannot be used as metrics." in caplog.messages
)
def test_run_force_preserves_comments_and_meta(tmp_dir, dvc, run_copy):
tmp_dir.gen({"foo": "foo", "foo1": "foo1"})
text = textwrap.dedent(
"""\
desc: top desc
cmd: python copy.py foo bar
deps:
- path: copy.py
- path: foo
outs:
# comment preserved
- path: bar
desc: out desc
meta:
name: copy-foo-bar
"""
)
(tmp_dir / "bar.dvc").write_text(text)
dvc.reproduce("bar.dvc")
# CRLF on windows makes the generated file bigger in size
code_size = 143 if os.name == "nt" else 142
assert (tmp_dir / "bar.dvc").read_text() == textwrap.dedent(
f"""\
desc: top desc
cmd: python copy.py foo bar
deps:
- path: copy.py
md5: 90c27dd80b698fe766f0c3ee0b6b9729
size: {code_size}
- path: foo
md5: acbd18db4cc2f85cedef654fccc4a4d8
size: 3
outs:
# comment preserved
- path: bar
desc: out desc
md5: acbd18db4cc2f85cedef654fccc4a4d8
size: 3
meta:
name: copy-foo-bar
md5: be659ce4a33cebb85d4e8e1335d394ad
"""
)
run_copy("foo1", "bar1", single_stage=True, force=True, fname="bar.dvc")
assert (tmp_dir / "bar.dvc").read_text() == textwrap.dedent(
f"""\
desc: top desc
cmd: python copy.py foo1 bar1
deps:
- path: foo1
md5: 299a0be4a5a79e6a59fdd251b19d78bb
size: 4
- path: copy.py
md5: 90c27dd80b698fe766f0c3ee0b6b9729
size: {code_size}
outs:
# comment preserved
- path: bar1
md5: 299a0be4a5a79e6a59fdd251b19d78bb
size: 4
meta:
name: copy-foo-bar
md5: 9e725b11cb393e6a7468369fa50328b7
"""
)
|
the-stack_0_10074 | from rdkit import Chem
from rdkit.Chem import AllChem, Draw
from rdkit.Chem import rdMolDescriptors
import numpy as np
import matplotlib.pyplot as plt
from PIL import Image, ImageFilter
#please check the rdkit manual drawing chemical fragments
#https://www.rdkit.org/docs/GettingStartedInPython.html#drawing-molecules
class FPVisualizer:
"""
Utility class to visualize and process chemical fragments
"""
def __init__(self, dataset, draw_all=False):
"""
Parameters
--------------------
dataset: tuple of (list of string , list of bit)
SMILES_list: list of smiles recorded in the database
fingerprint_list: list of the corresponding FPs recorded in the database
draw_all: bool
if true, draw all fragments in a chemical
"""
self.SMILES_list, self.fingerprint_list = zip(*dataset)
self.draw_all = draw_all
def get_mol_id_with_specific_bit_id(self, bit_ID):
"""
extract chemicals whose fingerprint[bit_ID]==1
Parameters
--------------------
bit_ID: int
id of fignerprint
Returns
-------------------
hit_ID: int
ID of chemicals whose fingerpint's bit_ID ==1
"""
hit = [True if fp[bit_ID] == 1 else False for fp in self.fingerprint_list]
temp = list(range(len(hit)))
hit_ID = [i for i, j in zip(temp, hit) if j == 1]
return hit_ID
def auto_draw_fragments(self, ID_list,draw=True):
"""
draw chemical fragments with specific bit_ID
Parameters
---------------
ID_list: list of int
list of bit_ID
Returns
-----------------
self.draw_fragments(tup): image object
chemical structures
smiles_list: list of string
corresponding smiles
"""
tup, smiles_list = self.calc_draw_tuples(ID_list)
#TODO: kekulization errors with some compounds
if draw:
img=self.draw_fragments(tup)
else:
img=None
return img, smiles_list
def calc_draw_tuples(self, ID_list):
"""
internal function of auto_draw_fragments
"""
draw_tuple = []
smiles_list = []
for bit_ID in ID_list:
#get smiles indexes whose bit_ID ==1
hit_ID = self.get_mol_id_with_specific_bit_id(bit_ID)
#create mol object whose molecular weight is smallest
match_SMILES_list = np.array(self.SMILES_list)[hit_ID]
sm = sort_SMILES_list_by_MW(match_SMILES_list)[0]
if sm == -1:
continue
smiles_list.append(sm)
mol = Chem.MolFromSmiles(sm)
bitI_rdkit = {}
fp_rdkit = Chem.RDKFingerprint(mol, bitInfo=bitI_rdkit)
draw_tuple.append((mol, bit_ID, bitI_rdkit))
return draw_tuple, smiles_list
def draw_fragments(self, draw_tuple):
image_list = []
for tup in draw_tuple:
mol, bit_ID, fp = tup
if self.draw_all:
# one molecule can have multiple fragments
for i in range(len(fp[bit_ID])):
img = Draw.DrawRDKitBit(mol, bit_ID, fp, whichExample=i)
image_list.append(img)
else:
img = Draw.DrawRDKitBit(mol, bit_ID, fp, whichExample=0)
image_list.append(img)
imgs = Image.fromarray(np.concatenate(image_list, axis=0))
return imgs
def calc_duplicate_array(self, bit_ID_list, threshold=0.5, plot=True):
"""
this is an original function to drop similar fingerprints
Parameters
-----------------
bit_ID_list: list of int
list of bit_ID of fignerprints. If different bit_ID have simialr contributions, they will be merged.
threshold: float
threshold to drop similar bit_IDs
plot: bool
if true, plot similarity heatmap
"""
ID_types = len(bit_ID_list)
subset_array = np.ones((ID_types, ID_types))
# from the database, extract a compound whose bit_ID ==1
for n1, i in enumerate(bit_ID_list):
hit_ids1 = self.get_mol_id_with_specific_bit_id(i)
for n2, j in enumerate(bit_ID_list):
hit_ids2 = self.get_mol_id_with_specific_bit_id(j)
# calcualte the difference of FP_i and FP_j
subset_array[n1][n2] = len(
list((set(hit_ids1)-set(hit_ids2))))/len(hit_ids1)
if plot:
plt.imshow(subset_array, interpolation='nearest', cmap='jet')
# delete similar bit_ids
dup_score = np.mean(subset_array, axis=0)
modif_bit_ID_list = [i for i, j in zip(
bit_ID_list, dup_score) if j > threshold]
return modif_bit_ID_list, subset_array
def calc_MW_from_SMILES_list(SMILES):
mol = Chem.MolFromSmiles(SMILES)
return rdMolDescriptors._CalcMolWt(mol)
def sort_SMILES_list_by_MW(SMILES_list):
"""
sort smiles by molecular weight
"""
if len(SMILES_list) == 0:
return [-1]
mw_list = [calc_MW_from_SMILES_list(i) for i in SMILES_list]
dataset = (list(zip(mw_list, SMILES_list)))
dataset.sort(key=lambda x: x[0])
mw_list, SMILES_list = list(zip(*dataset))
return SMILES_list
|
the-stack_0_10075 | # MIT License
#
# Copyright (c) 2020 Arkadiusz Netczuk <[email protected]>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import unittest
import datetime
from worklog.gui.dataobject import KernLogParser
from testworklog.data import get_data_path
class KernLogParserTest(unittest.TestCase):
def setUp(self):
## Called before testfunction is executed
pass
def tearDown(self):
## Called after testfunction was executed
pass
def test_parseKernLog_regular(self):
kernlogPath = get_data_path( "kern.log_regular" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 9 )
item = logList[0]
self.assertEqual( item[0], datetime.datetime( year=2020, month=10, day=26, hour=0, minute=9 ) )
self.assertEqual( item[1], datetime.datetime( year=2020, month=10, day=26, hour=1, minute=22 ) )
def test_parseKernLog_fail(self):
kernlogPath = get_data_path( "kern.log_fail" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 1 )
item = logList[0]
self.assertEqual( item[0], datetime.datetime( year=2020, month=10, day=26, hour=15, minute=49 ) )
self.assertEqual( item[1], datetime.datetime( year=2020, month=10, day=26, hour=15, minute=49 ) )
def test_parseKernLog_suspend(self):
kernlogPath = get_data_path( "kern.log_suspend" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 4 )
item = logList[0]
self.assertEqual( item[0], datetime.datetime( year=2020, month=10, day=31, hour=10, minute=46 ) )
self.assertEqual( item[1], datetime.datetime( year=2020, month=10, day=31, hour=10, minute=53 ) )
def test_parseKernLog_newyear(self):
kernlogPath = get_data_path( "kern.log_newyear" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 2 )
item1 = logList[0]
self.assertEqual( item1[0], datetime.datetime( year=2020, month=12, day=31, hour=18, minute=28 ) )
self.assertEqual( item1[1], datetime.datetime( year=2020, month=12, day=31, hour=18, minute=32 ) )
item2 = logList[1]
self.assertEqual( item2[0], datetime.datetime( year=2021, month=1, day=1, hour=20, minute=31 ) )
self.assertEqual( item2[1], datetime.datetime( year=2021, month=1, day=1, hour=20, minute=32 ) )
def test_parseKernLog_joinline(self):
## sometimes can happen that two lines of log are joined together without newline separator
kernlogPath = get_data_path( "kern.log_joinline" )
logList = KernLogParser.parseKernLog( kernlogPath )
self.assertEqual( len( logList ), 2 )
item1 = logList[0]
self.assertEqual( item1[0], datetime.datetime( year=2021, month=5, day=7, hour=23, minute=24 ) )
self.assertEqual( item1[1], datetime.datetime( year=2021, month=5, day=7, hour=23, minute=24 ) )
item2 = logList[1]
self.assertEqual( item2[0], datetime.datetime( year=2021, month=5, day=8, hour=21, minute=35 ) )
self.assertEqual( item2[1], datetime.datetime( year=2021, month=5, day=8, hour=21, minute=35 ) )
|
the-stack_0_10076 | # Copyright 2014 Mellanox Technologies, Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from oslo_log import log as logging
from neutron._i18n import _, _LE
from neutron.agent.linux import utils
from neutron.common import exceptions as n_exc
LOG = logging.getLogger(__name__)
class IpLinkSupportError(n_exc.NeutronException):
pass
class UnsupportedIpLinkCommand(IpLinkSupportError):
message = _("ip link command is not supported: %(reason)s")
class InvalidIpLinkCapability(IpLinkSupportError):
message = _("ip link capability %(capability)s is not supported")
class IpLinkConstants(object):
IP_LINK_CAPABILITY_STATE = "state"
IP_LINK_CAPABILITY_VLAN = "vlan"
IP_LINK_CAPABILITY_RATE = "rate"
IP_LINK_CAPABILITY_SPOOFCHK = "spoofchk"
IP_LINK_SUB_CAPABILITY_QOS = "qos"
class IpLinkSupport(object):
VF_BLOCK_REGEX = r"\[ vf NUM(?P<vf_block>.*) \] \]"
CAPABILITY_REGEX = r"\[ %s (.*)"
SUB_CAPABILITY_REGEX = r"\[ %(cap)s (.*) \[ %(subcap)s (.*)"
@classmethod
def get_vf_mgmt_section(cls):
"""Parses ip link help output, and gets vf block"""
output = cls._get_ip_link_output()
vf_block_pattern = re.search(cls.VF_BLOCK_REGEX,
output,
re.DOTALL | re.MULTILINE)
if vf_block_pattern:
return vf_block_pattern.group("vf_block")
@classmethod
def vf_mgmt_capability_supported(cls, vf_section, capability,
subcapability=None):
"""Validate vf capability support
Checks if given vf capability (and sub capability
if given) supported
:param vf_section: vf Num block content
:param capability: for example: vlan, rate, spoofchk, state
:param subcapability: for example: qos
"""
if not vf_section:
return False
if subcapability:
regex = cls.SUB_CAPABILITY_REGEX % {"cap": capability,
"subcap": subcapability}
else:
regex = cls.CAPABILITY_REGEX % capability
pattern_match = re.search(regex, vf_section,
re.DOTALL | re.MULTILINE)
return pattern_match is not None
@classmethod
def _get_ip_link_output(cls):
"""Gets the output of the ip link help command
Runs ip link help command and stores its output
Note: ip link help return error and writes its output to stderr
so we get the output from there. however, if this issue
will be solved and the command will write to stdout, we
will get the output from there too.
"""
try:
ip_cmd = ['ip', 'link', 'help']
_stdout, _stderr = utils.execute(
ip_cmd,
check_exit_code=False,
return_stderr=True,
log_fail_as_error=False)
except Exception as e:
LOG.exception(_LE("Failed executing ip command"))
raise UnsupportedIpLinkCommand(reason=e)
return _stdout or _stderr
|
the-stack_0_10077 | import pathlib
import warnings
import functools
from typing import Dict
from contextlib import contextmanager
from urllib.parse import urlparse
from sunpy.util.exceptions import SunpyUserWarning
from sunpy.util.util import hash_file
__all__ = ['DataManager']
class DataManager:
"""
This class provides a remote data manager for managing remote files.
Parameters
----------
cache: `sunpy.data.data_manager.cache.Cache`
Cache object to be used by `~sunpy.data.data_manager.manager.DataManager`.
"""
def __init__(self, cache):
self._cache = cache
self._file_cache = {}
self._skip_hash_check = False
self._skip_file: Dict[str, str] = {}
def require(self, name, urls, sha_hash):
"""
Decorator for informing the data manager about the requirement of
a file by a function.
Parameters
----------
name: `str`
The name to reference the file with.
urls: `list` or `str`
A list of urls to download the file from.
sha_hash: `str`
SHA-1 hash of file.
"""
if isinstance(urls, str):
urls = [urls]
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
replace = self._skip_file.get(name, None)
if replace:
uri_parse = urlparse(replace['uri'])
if uri_parse.scheme in ("", "file"):
# If a relative file uri is specified (i.e.
# `file://sunpy/test`) this maintains compatibility
# with the original behaviour where this would be
# interpreted as `./sunpy/test` if no scheme is
# specified netloc will be '' by default.
file_path = uri_parse.netloc + uri_parse.path
file_hash = hash_file(file_path)
else:
file_path, file_hash, _ = self._cache._download_and_hash([replace['uri']])
if replace['hash'] and file_hash != replace['hash']:
# if hash provided to replace function doesn't match the hash of the file
# raise error
raise ValueError(
"Hash provided to override_file does not match hash of the file.")
elif self._skip_hash_check:
file_path = self._cache.download(urls, redownload=True)
else:
details = self._cache.get_by_hash(sha_hash)
if not details:
# In case we are matching by hash and file does not exist
# That might mean the wrong hash is supplied to decorator
# We match by urls to make sure that is not the case
if self._cache_has_file(urls):
raise ValueError(" Hash provided does not match the hash in database.")
file_path = self._cache.download(urls)
if hash_file(file_path) != sha_hash:
# the hash of the file downloaded does not match provided hash
# this means the file has changed on the server.
# the function should be updated to use the new
# hash. Raise an error to notify.
raise RuntimeError(
"Remote file on the server has changed. Update hash of the function.")
else:
# This is to handle the case when the local file
# appears to be tampered/corrupted
if hash_file(details['file_path']) != details['file_hash']:
warnings.warn("Hashes do not match, the file will be redownloaded (could be be tampered/corrupted)",
SunpyUserWarning)
file_path = self._cache.download(urls, redownload=True)
# Recheck the hash again, if this fails, we will exit.
if hash_file(file_path) != details['file_hash']:
raise RuntimeError("Redownloaded file also has the incorrect hash."
"The remote file on the server might have changed.")
else:
file_path = details['file_path']
self._file_cache[name] = file_path
return func(*args, **kwargs)
return wrapper
return decorator
@contextmanager
def override_file(self, name, uri, sha_hash=None):
"""
Replaces the file by the name with the file provided by the url/path.
Parameters
----------
name: `str`
Name of the file provided in the `require` decorator.
uri: `str`
URI of the file which replaces original file. Scheme should be one
of ``http``, ``https``, ``ftp`` or ``file``. If no scheme is given
the uri will be interpreted as a local path. i.e.
``file:///tmp/test`` and ``/tmp/test`` are the same.
sha_hash: `str`, optional
SHA256 hash of the file to compared to after downloading.
"""
try:
self._skip_file[name] = {
'uri': uri,
'hash': sha_hash,
}
yield
finally:
_ = self._skip_file.pop(name, None)
@contextmanager
def skip_hash_check(self):
"""
Disables hash checking temporarily
Examples
--------
>>> with remote_data_manager.skip_hash_check(): # doctest: +SKIP
... myfunction() # doctest: +SKIP
"""
try:
self._skip_hash_check = True
yield
finally:
self._skip_hash_check = False
def get(self, name):
"""
Get the file by name.
Parameters
----------
name: `str`
Name of the file given to the data manager, same as the one provided
in `~sunpy.data.data_manager.manager.DataManager.require`.
Returns
-------
`pathlib.Path`
Path of the file.
Raises
------
`KeyError`
If ``name`` is not in the cache.
"""
return pathlib.Path(self._file_cache[name])
def _cache_has_file(self, urls):
for url in urls:
if self._cache._get_by_url(url):
return True
return False
|
the-stack_0_10078 | """Emoji
Available Commands:
.emoji shrug
.emoji apple
.emoji :/
.emoji -_-"""
from telethon import events
import asyncio
@borg.on(events.NewMessage(pattern=r"\.(.*)", outgoing=True))
async def _(event):
if event.fwd_from:
return
animation_interval = 3
animation_ttl = range(0, 18)
input_str = event.pattern_match.group(1)
if input_str == "call":
await event.edit(input_str)
animation_chars = [
"`Connecting To Telegram Headquarters...`",
"`Call Connected.`",
"`Telegram: Hello This is Telegram HQ. Who is this?`",
"`Me: Yo this is` @mantiz_rip ,`Please Connect me to my lil bro,Pavel Durov`",
"`User Authorised.`",
"`Calling Pavel Durov` `At +916969696969`",
"`Private Call Connected...`",
"`Me: Hello Sir, Please Ban This Telegram Account.`",
"`Pavel: May I Know Who Is This?`",
"`Me: Yo Brah, I Am` @mantiz_rip ",
"`Pavel: OMG!!! Long time no see, Wassup Brother...\nI'll Make Sure That Guy Account Will Get Blocked Within 24Hrs.`",
"`Me: Thanks, See You Later Brah.`",
"`Pavel: Please Don't Thank Brah, Telegram Is Our's. Just Gimme A Call When You Become Free.`",
"`Me: Is There Any Issue/Emergency???`",
"`Pavel: Yes Sur, There Is A Bug In Telegram v69.6.9.\nI Am Not Able To Fix It. If Possible, Please Help Fix The Bug.`",
"`Me: Send Me The App On My Telegram Account, I Will Fix The Bug & Send You.`",
"`Pavel: Sure Sur \nTC Bye Bye :)`",
"`Private Call Disconnected.`"
]
for i in animation_ttl:
await asyncio.sleep(animation_interval)
await event.edit(animation_chars[i % 18])
|
the-stack_0_10079 | import itertools
import logging
from pint import pi_theorem
from pint.testsuite import QuantityTestCase
class TestPiTheorem(QuantityTestCase):
def test_simple(self, caplog):
# simple movement
with caplog.at_level(logging.DEBUG):
assert pi_theorem({"V": "m/s", "T": "s", "L": "m"}) == [
{"V": 1, "T": 1, "L": -1}
]
# pendulum
assert pi_theorem({"T": "s", "M": "grams", "L": "m", "g": "m/s**2"}) == [
{"g": 1, "T": 2, "L": -1}
]
assert len(caplog.records) == 7
def test_inputs(self):
V = "km/hour"
T = "ms"
L = "cm"
f1 = lambda x: x
f2 = lambda x: self.Q_(1, x)
f3 = lambda x: self.Q_(1, x).units
f4 = lambda x: self.Q_(1, x).dimensionality
fs = f1, f2, f3, f4
for fv, ft, fl in itertools.product(fs, fs, fs):
qv = fv(V)
qt = ft(T)
ql = ft(L)
assert self.ureg.pi_theorem({"V": qv, "T": qt, "L": ql}) == [
{"V": 1.0, "T": 1.0, "L": -1.0}
]
|
the-stack_0_10081 | import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class NoseTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import nose
errcode = nose.main(self.test_args)
sys.exit(errcode)
setup(name='battleforcastile',
version='0.0.2',
description='Play a fantasy cards game on your terminal',
maintainer='José Vidal',
maintainer_email='[email protected]',
author='José Vidal',
author_email='[email protected]',
url='https://github.com/battleforcastile/battleforcastile',
license='MIT',
long_description=open('README.md').read(),
platforms='any',
keywords=[
'fantasy',
'game',
],
packages=find_packages(),
install_requires=[
'click==7.0'
],
entry_points={
"console_scripts": [
"battleforcastile = battleforcastile.main:cli",
],
},
classifiers=[
'Programming Language :: Python',
'Operating System :: OS Independent'
],
tests_require=['nose'],
cmdclass={'test': NoseTest}
) |
the-stack_0_10083 | # coding: utf-8
#
# Copyright 2022 :Barry-Thomas-Paul: Moss
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http: // www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Exception Class
# this is a auto generated file generated by Cheetah
# Namespace: com.sun.star.ucb
# Libre Office Version: 7.3
from ooo.oenv.env_const import UNO_NONE
import typing
from ..task.classified_interaction_request import ClassifiedInteractionRequest as ClassifiedInteractionRequest_9f72121b
from ..uno.x_interface import XInterface as XInterface_8f010a43
from ..task.interaction_classification import InteractionClassification as InteractionClassification_6c4d10e7
class InteractiveAppException(ClassifiedInteractionRequest_9f72121b):
"""
Exception Class
An application error.
**since**
OOo 1.1.2
See Also:
`API InteractiveAppException <https://api.libreoffice.org/docs/idl/ref/exceptioncom_1_1sun_1_1star_1_1ucb_1_1InteractiveAppException.html>`_
"""
__ooo_ns__: str = 'com.sun.star.ucb'
__ooo_full_ns__: str = 'com.sun.star.ucb.InteractiveAppException'
__ooo_type_name__: str = 'exception'
__pyunointerface__: str = 'com.sun.star.ucb.InteractiveAppException'
__pyunostruct__: str = 'com.sun.star.ucb.InteractiveAppException'
typeName: str = 'com.sun.star.ucb.InteractiveAppException'
"""Literal Constant ``com.sun.star.ucb.InteractiveAppException``"""
def __init__(self, Message: typing.Optional[str] = '', Context: typing.Optional[XInterface_8f010a43] = None, Classification: typing.Optional[InteractionClassification_6c4d10e7] = InteractionClassification_6c4d10e7.ERROR, Code: typing.Optional[int] = 0) -> None:
"""
Constructor
Arguments:
Message (str, optional): Message value.
Context (XInterface, optional): Context value.
Classification (InteractionClassification, optional): Classification value.
Code (int, optional): Code value.
"""
kargs = {
"Message": Message,
"Context": Context,
"Classification": Classification,
"Code": Code,
}
self._init(**kargs)
def _init(self, **kwargs) -> None:
self._code = kwargs["Code"]
inst_keys = ('Code',)
kargs = kwargs.copy()
for key in inst_keys:
del kargs[key]
super()._init(**kargs)
@property
def Code(self) -> int:
"""
The type of application error.
"""
return self._code
@Code.setter
def Code(self, value: int) -> None:
self._code = value
__all__ = ['InteractiveAppException']
|
the-stack_0_10084 | #!/usr/bin/env python
#################################################################
##
## Script: pyttt.py
## Author: Premshree Pillai
## Description: Tic-Tac-Toe game in Python
## Web: http://www.qiksearch.com/
## http://premshree.resource-locator.com/
## Created: 19/03/04 (dd/mm/yy)
##
## (C) 2004 Premshree Pillai
##
#################################################################
import cgi
print("Content-type: text/html\n\n")
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
bsize = 3
playerToken = "X"
myToken = "0"
gameOver = 0
winArr = []
rowArr = []
colArr = []
digArr = []
x = 0
while x < bsize * bsize :
rowArr.append(0)
colArr.append(0)
digArr.append(0)
x = x + 1
out1 = """<html>
<head>
<title>Tic Tac Toe in Python</title>
<style type="text/css">
.main{border:#9999CC solid 2px; width:350px}
.btn{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#9999CC; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#EFEFFF}
.btn_over{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#EFEFFF; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#9999CC}
.btn_down{font-family:comic sans ms,verdana,arial,helvetica; font-size:20pt; font-weight:bold; background:#666699; width:50px; height:50px; border:#666699 solid 1px; cursor:hand; color:#EFEFFF}
.footer{font-family:verdana,arial,helvetica; font-size:8pt; color:#FFFFFF}
.link{font-family:verdana,arial,helvetica; font-size:8pt; color:#FFFFFF}
.link:hover{font-family:verdana,arial,helvetica; font-size:8pt; color:#EFEFFF}
</style>
<script language="JavaScript">
var doneFlag=false;
function toggleVal(who) {
var check;
eval('check=document.ttt.'+who+'_btn.value;');
if(check==" ") {
if(!doneFlag) {
eval('document.ttt.'+who+'_btn.value="X";');
eval('document.ttt.'+who+'_btn.disabled="true";');
eval('document.ttt.'+who+'.value="X";');
document.ttt.submit();
doneFlag=true;
document.getElementById('process').innerHTML="Processing.........";
}
}
else {
alert('Invalid Move!');
}
}
</script>
</head>
<body>
<table width="100%" height="100%"><tr><td align="center">
<table width="346" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table width="348" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table align="center" cellspacing="0" cellpadding="0" class="main"><tr><td align="center">
<table width="100%" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td align="center"><a href="pyttt.py"><img src="../ttt_py.gif" border="0" alt="Tic Tac Toe (in Python)"></a></td></tr></table>
<table width="100%" bgcolor="#EFEFFF" cellspacing="0" cellpadding="0"><tr><td align="center"><a href="http://www.qiksearch.com"><img src="../qiksearch_ttt_py.gif" border="0" alt="www.qiksearch.com"></a></td></tr></table>"""
print(out1)
def genBox(size):
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
retVal = '<form name="ttt" method="post" action="pyttt.py">'
i = 0
while i < size :
j = 0
while j < size :
count = count + 1
retVal = retVal + '<input type="button" name="s' + str(count) + '_btn" value=" " class="btn" onClick="toggleVal(\'s' + str(count) + '\')" onMouseover="this.className=\'btn_over\'" onMouseout="this.className=\'btn\'" onMousedown="this.className=\'btn_down\'"><input type="hidden" name="s' + str(count) + '" value=" ">'
j = j + 1
retVal = retVal + '<br>'
i = i + 1
retVal = retVal + '</form>'
print(retVal)
def genBox2(size,arr):
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
retVal = '<form name="ttt" method="post" action="pyttt.py">'
i = 0
while i < size :
j = 0
while j < size :
count = count + 1
retVal = retVal + '<input type="button" name="s' + str(count) + '_btn" value="' + str(arr[count-1]) + '" class="btn" onClick="toggleVal(\'s' + str(count) + '\')" onMouseover="this.className=\'btn_over\'" onMouseout="this.className=\'btn\'" onMousedown="this.className=\'btn_down\'"><input type="hidden" name="s' + str(count) + '" value="' + str(arr[count-1]) + '">'
j = j + 1
retVal = retVal + '<br>'
i = i + 1
retVal = retVal + '</form>'
print(retVal)
def isEmpty(who):
if who == " ":
return 1
else:
return 0;
def move(bsize,arr):
global playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
count = 0
maxCount = 0
pos = 0
retVal = 0
# Build Row Array
i = 0
while i < bsize :
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
count = count + 1
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
j = j + 1
rowArr[i] = maxCount
if fullCounter == bsize :
rowArr[i] = -1
i = i + 1
# Building Column Array
i = 0
while i < bsize :
count = i + 1
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
count = count + bsize
j = j + 1
colArr[i] = maxCount
if fullCounter == bsize :
colArr[i] = -1
i = i + 1
# Building Diagonal Array
i = 0
while i < 2 :
if i == 0 :
count = i + 1
else:
count = bsize
maxCount = 0
fullCounter = 0
j = 0
while j < bsize :
who = arr[count-1]
if who == playerToken :
maxCount = maxCount + 1
fullCounter = fullCounter + 1
if who == myToken :
fullCounter = fullCounter + 1
if i == 0 :
count = count + bsize + 1
else:
count = count + bsize - 1
j = j + 1
digArr[i] = maxCount
if fullCounter == bsize :
digArr[i] = -1
i = i + 1
# Finding Max Values
maxRow = myMax(0,bsize,"row",rowArr)
maxCol = myMax(0,bsize,"col",colArr)
maxDig = myMax(0,bsize,"dig",digArr)
maxArrs = []
maxArrs.append(myMax(1,bsize,"row",rowArr))
maxArrs.append(myMax(1,bsize,"col",colArr))
maxArrs.append(myMax(1,bsize,"dig",digArr))
if myMax(0,bsize,"x",maxArrs) == 0 :
pos = bsize * (maxRow + 1) - bsize
if myMax(0,bsize,"x",maxArrs) == 1 :
pos = maxCol
if myMax(0,bsize,"x",maxArrs) == 2 :
if maxDig == 0 :
pos = maxDig
else:
pos = bsize - 1
retFlag = 0
y = 0
while y < bsize :
if not(retFlag):
if arr[pos] == " " :
retVal = pos
retFlag = 1
if myMax(0,bsize,"x",maxArrs) == 0 :
pos = pos + 1
if myMax(0,bsize,"x",maxArrs) == 1 :
pos = pos + bsize
if myMax(0,bsize,"x",maxArrs) == 2 :
if maxDig == 0 :
pos = pos + bsize + 1
else:
pos = pos + bsize - 1
y = y + 1
return retVal
def myMax(what,bsize,type,arr):
global playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
max = -1
maxIndex = -1
if type != "dig" :
i = 0
while i < bsize :
if arr[i] > max :
max = arr[i]
maxIndex = i
i = i + 1
if type == "dig" :
i = 0
while i < 2 :
if arr[i] > max :
max = arr[i]
maxIndex = i
i = i + 1
if what == 0 :
return maxIndex
else:
return max
def playerWin():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = playerToken
if (s1 == who == s2 == s3) or (s4 == who == s5 == s6) or (s7 == who == s8 == s9) or (s1 == who == s4 == s7) or (s2 == who == s5 == s8) or (s3 == who == s6 == s9) or (s1 == who == s5 == s9) or (s3 == who == s5 == s7) :
return 1
else:
return 0
def iWin():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = myToken
if (s1 == who == s2 == s3) or (s4 == who == s5 == s6) or (s7 == who == s8 == s9) or (s1 == who == s4 == s7) or (s2 == who == s5 == s8) or (s3 == who == s6 == s9) or (s1 == who == s5 == s9) or (s3 == who == s5 == s7) :
return 1
else:
return 0
def whereWinComp():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = myToken
if (s1 == who == s2 == s3) :
winArr = ['s1','s2','s3']
if (s4 == who == s5 == s6) :
winArr = ['s4','s5','s6']
if (s7 == who == s8 == s9) :
winArr = ['s7','s8','s9']
if (s1 == who == s4 == s7) :
winArr = ['s1','s4','s7']
if (s2 == who == s5 == s8) :
winArr = ['s2','s5','s8']
if (s3 == who == s6 == s9) :
winArr = ['s3','s6','s9']
if (s1 == who == s5 == s9) :
winArr = ['s1','s5','s9']
if (s3 == who == s5 == s7) :
winArr = ['s3','s5','s7']
def whereWinPlayer():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
who = playerToken
if (s1 == who == s2 == s3) :
winArr = ['s1','s2','s3']
if (s4 == who == s5 == s6) :
winArr = ['s4','s5','s6']
if (s7 == who == s8 == s9) :
winArr = ['s7','s8','s9']
if (s1 == who == s4 == s7) :
winArr = ['s1','s4','s7']
if (s2 == who == s5 == s8) :
winArr = ['s2','s5','s8']
if (s3 == who == s6 == s9) :
winArr = ['s3','s6','s9']
if (s1 == who == s5 == s9) :
winArr = ['s1','s5','s9']
if (s3 == who == s5 == s7) :
winArr = ['s3','s5','s7']
def draw():
global bsize,playerToken,myToken,gameOver,winArr,rowArr,colArr,digArr,vals,s1,s2,s3,s4,s5,s6,s7,s8,s9
drawCounter = 0
dCounter = 0
while dCounter < len(vals) :
if vals[dCounter] != " " :
drawCounter = drawCounter + 1
dCounter = dCounter + 1
if drawCounter == bsize * bsize :
return 1
else:
return 0
form = cgi.FieldStorage()
if form :
s1 = form['s1'].value
s2 = form['s2'].value
s3 = form['s3'].value
s4 = form['s4'].value
s5 = form['s5'].value
s6 = form['s6'].value
s7 = form['s7'].value
s8 = form['s8'].value
s9 = form['s9'].value
vals = [s1,s2,s3,s4,s5,s6,s7,s8,s9]
if draw() or playerWin() :
gameOver = 1
# Computer's Move!
movIndex = move(bsize,vals)
if not(gameOver) :
vals[movIndex] = myToken
# Update S's
if not(gameOver) :
if movIndex == 0 :
s1 = myToken
if movIndex == 1 :
s2 = myToken
if movIndex == 2 :
s3 = myToken
if movIndex == 3 :
s4 = myToken
if movIndex == 4 :
s5 = myToken
if movIndex == 5 :
s6 = myToken
if movIndex == 6 :
s7 = myToken
if movIndex == 7 :
s8 = myToken
if movIndex == 8 :
s9 = myToken
genBox2(bsize,vals)
if playerWin() :
print('<font face="verdana,arial,helvetica" color="#009900" size="4"><b>Wow! You Won!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
whereWinPlayer()
print('<script language="JavaScript">')
winCount = 0
while winCount < len(winArr) :
print('document.ttt.' + winArr[winCount] + '_btn.style.color=\'#009900\';')
winCount = winCount + 1
w = 0
while w < (bsize * bsize) :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
gameOver = 1
if iWin() and not(gameOver) :
print('<font face="verdana,arial,helvetica" color="#FF0000" size="4"><b>Oops! You Lost!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
whereWinComp()
print('<script language="JavaScript">')
winCount = 0
while winCount < len(winArr) :
print('document.ttt.' + winArr[winCount] + '_btn.style.color=\'#FF0000\';');
winCount = winCount + 1
w = 0
while w < bsize * bsize :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
gameOver = 1
if draw() and not(playerWin()) and not(iWin()) :
print('<font face="verdana,arial,helvetica" color="#000000" size="4"><b>It\'s a Draw!</b></font><br><br>')
print('<input type="button" onClick="location.href=\'pyttt.py\'" value="Play Again!" style="background:#CCCCCC; font-weight:bold; cursor:hand"><br><br>')
print('<script language="JavaScript">')
w = 0
while w < bsize * bsize :
if vals[w] == " " :
print('document.ttt.s' + str(w + 1) + '_btn.disabled=true;')
w = w + 1
print('</script>')
else:
genBox(bsize)
out2 = """<div style="font-family:verdana,arial,helvetica; font-weight:bold; font-size:10pt; color:#CC0000; background:#EFEFFF; width:100%; padding:3px" id="process"></div>
<table width="100%" bgcolor="#9999CC"><tr><td><span class="footer">© 2004 <a href="http://www.qiksearch.com" class="link">Premshree Pillai</a> | <a href="http://www.guestbookdepot.com/cgi-bin/guestbook.cgi?book_id=374186" class="link">Sign my Guestbook</a>.</span></td></tr></table>
</td></tr></table>
<table width="348" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
<table width="346" align="center" bgcolor="#9999CC" cellspacing="0" cellpadding="0"><tr><td></td></tr></table>
</td></tr></table>
</body>
</html>"""
print(out2)
|
the-stack_0_10085 | import os
import time
import string
import argparse
import re
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torch.nn.functional as F
import numpy as np
from nltk.metrics.distance import edit_distance
from utils import CTCLabelConverter, AttnLabelConverter, Averager
from dataset import hierarchical_dataset, AlignCollate
from model import Model
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def benchmark_all_eval(model, criterion, converter, opt, calculate_infer_time=False):
""" evaluation with 10 benchmark evaluation datasets """
# The evaluation datasets, dataset order is same with Table 1 in our paper.
eval_data_list = ['IIIT5k_3000', 'SVT', 'IC03_860', 'IC03_867', 'IC13_857',
'IC13_1015', 'IC15_1811', 'SVTP', 'CUTE80', 'IC15_2077']
if calculate_infer_time:
evaluation_batch_size = 1 # batch_size should be 1 to calculate the GPU inference time per image.
else:
evaluation_batch_size = opt.batch_size
list_accuracy = []
total_forward_time = 0
total_evaluation_data_number = 0
total_correct_number = 0
log = open(f'./result/{opt.exp_name}/log_all_evaluation.txt', 'a')
dashed_line = '-' * 80
print(dashed_line)
log.write(dashed_line + '\n')
for eval_data in eval_data_list:
eval_data_path = os.path.join(opt.eval_data, eval_data)
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=eval_data_path, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=evaluation_batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, norm_ED_by_best_model, _, _, _, infer_time, length_of_data = validation(
model, criterion, evaluation_loader, converter, opt)
list_accuracy.append(f'{accuracy_by_best_model:0.3f}')
total_forward_time += infer_time
total_evaluation_data_number += len(eval_data)
total_correct_number += accuracy_by_best_model * length_of_data
log.write(eval_data_log)
print(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}')
log.write(f'Acc {accuracy_by_best_model:0.3f}\t normalized_ED {norm_ED_by_best_model:0.3f}\n')
print(dashed_line)
log.write(dashed_line + '\n')
averaged_forward_time = total_forward_time / total_evaluation_data_number * 1000
total_accuracy = total_correct_number / total_evaluation_data_number
params_num = sum([np.prod(p.size()) for p in model.parameters()])
evaluation_log = 'accuracy: '
for name, accuracy in zip(eval_data_list, list_accuracy):
evaluation_log += f'{name}: {accuracy}\t'
evaluation_log += f'total_accuracy: {total_accuracy:0.3f}\t'
evaluation_log += f'averaged_infer_time: {averaged_forward_time:0.3f}\t# parameters: {params_num/1e6:0.3f}'
print(evaluation_log)
log.write(evaluation_log + '\n')
log.close()
return None
def validation(model, criterion, evaluation_loader, converter, opt):
""" validation or evaluation """
n_correct = 0
norm_ED = 0
length_of_data = 0
infer_time = 0
valid_loss_avg = Averager()
for i, (image_tensors, labels) in enumerate(evaluation_loader):
batch_size = image_tensors.size(0)
length_of_data = length_of_data + batch_size
image = image_tensors.to(device)
# For max length prediction
length_for_pred = torch.IntTensor([opt.batch_max_length] * batch_size).to(device)
text_for_pred = torch.LongTensor(batch_size, opt.batch_max_length + 1).fill_(0).to(device)
text_for_loss, length_for_loss = converter.encode(labels, batch_max_length=opt.batch_max_length)
start_time = time.time()
preds, _= model(image, text_for_pred, is_train=False)
preds = preds[-1]
forward_time = time.time() - start_time
preds = preds[:, :text_for_loss.shape[1] - 1, :]
target = text_for_loss[:, 1:] # without [GO] Symbol
cost = criterion(preds.contiguous().view(-1, preds.shape[-1]), target.contiguous().view(-1))
# select max probability (greedy decoding) then decode index to character
_, preds_index = preds.max(2)
preds_str = converter.decode(preds_index, length_for_pred)
labels = converter.decode(text_for_loss[:, 1:], length_for_loss)
infer_time += forward_time
valid_loss_avg.add(cost)
# calculate accuracy & confidence score
preds_prob = F.softmax(preds, dim=2)
preds_max_prob, _ = preds_prob.max(dim=2)
confidence_score_list = []
for gt, pred, pred_max_prob in zip(labels, preds_str, preds_max_prob):
gt = gt[:gt.find('[s]')]
pred_EOS = pred.find('[s]')
pred = pred[:pred_EOS] # prune after "end of sentence" token ([s])
pred_max_prob = pred_max_prob[:pred_EOS]
# To evaluate 'case sensitive model' with alphanumeric and case insensitive setting.
if opt.sensitive and opt.data_filtering_off:
pred = pred.lower()
gt = gt.lower()
alphanumeric_case_insensitve = '0123456789abcdefghijklmnopqrstuvwxyz'
out_of_alphanumeric_case_insensitve = f'[^{alphanumeric_case_insensitve}]'
pred = re.sub(out_of_alphanumeric_case_insensitve, '', pred)
gt = re.sub(out_of_alphanumeric_case_insensitve, '', gt)
if pred == gt:
n_correct += 1
'''
(old version) ICDAR2017 DOST Normalized Edit Distance https://rrc.cvc.uab.es/?ch=7&com=tasks
"For each word we calculate the normalized edit distance to the length of the ground truth transcription."
if len(gt) == 0:
norm_ED += 1
else:
norm_ED += edit_distance(pred, gt) / len(gt)
'''
# ICDAR2019 Normalized Edit Distance
if len(gt) == 0 or len(pred) == 0:
norm_ED += 0
elif len(gt) > len(pred):
norm_ED += 1 - edit_distance(pred, gt) / len(gt)
else:
norm_ED += 1 - edit_distance(pred, gt) / len(pred)
# calculate confidence score (= multiply of pred_max_prob)
try:
confidence_score = pred_max_prob.cumprod(dim=0)[-1]
except:
confidence_score = 0 # for empty pred case, when prune after "end of sentence" token ([s])
confidence_score_list.append(confidence_score)
# print(pred, gt, pred==gt, confidence_score)
accuracy = n_correct / float(length_of_data) * 100
norm_ED = norm_ED / float(length_of_data) # ICDAR2019 Normalized Edit Distance
return valid_loss_avg.val(), accuracy, norm_ED, preds_str, confidence_score_list, labels, infer_time, length_of_data
def test(opt):
""" model configuration """
converter = AttnLabelConverter(opt.character)
opt.num_class = len(converter.character)
if opt.rgb:
opt.input_channel = 3
model = Model(opt)
print('model input parameters', opt.imgH, opt.imgW, opt.num_fiducial, opt.input_channel, opt.output_channel,
opt.hidden_size, opt.num_class, opt.batch_max_length)
model = torch.nn.DataParallel(model).to(device)
# load model
print('loading pretrained model from %s' % opt.saved_model)
model.load_state_dict(torch.load(opt.saved_model, map_location=device))
opt.exp_name = '_'.join(opt.saved_model.split('/')[1:])
# print(model)
""" keep evaluation model and result logs """
os.makedirs(f'./result/{opt.exp_name}', exist_ok=True)
os.system(f'cp {opt.saved_model} ./result/{opt.exp_name}/')
""" setup loss """
criterion = torch.nn.CrossEntropyLoss(ignore_index=0).to(device) # ignore [GO] token = ignore index 0
""" evaluation """
model.eval()
with torch.no_grad():
if opt.benchmark_all_eval: # evaluation with 10 benchmark evaluation datasets
benchmark_all_eval(model, criterion, converter, opt)
else:
log = open(f'./result/{opt.exp_name}/log_evaluation.txt', 'a')
AlignCollate_evaluation = AlignCollate(imgH=opt.imgH, imgW=opt.imgW, keep_ratio_with_pad=opt.PAD)
eval_data, eval_data_log = hierarchical_dataset(root=opt.eval_data, opt=opt)
evaluation_loader = torch.utils.data.DataLoader(
eval_data, batch_size=opt.batch_size,
shuffle=False,
num_workers=int(opt.workers),
collate_fn=AlignCollate_evaluation, pin_memory=True)
_, accuracy_by_best_model, _, _, _, _, _, _ = validation(
model, criterion, evaluation_loader, converter, opt)
log.write(eval_data_log)
print(f'{accuracy_by_best_model:0.3f}')
log.write(f'{accuracy_by_best_model:0.3f}\n')
log.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--eval_data', default='../data_lmdb_release/evaluation/', help='path to evaluation dataset')
parser.add_argument('--benchmark_all_eval', default=True, action='store_true', help='evaluate 10 benchmark evaluation datasets')
parser.add_argument('--workers', type=int, help='number of data loading workers', default=4)
parser.add_argument('--batch_size', type=int, default=192, help='input batch size')
parser.add_argument('--saved_model', required=True, help="path to saved_model to evaluation")
""" Data processing """
parser.add_argument('--batch_max_length', type=int, default=35, help='maximum-label-length')
parser.add_argument('--imgH', type=int, default=32, help='the height of the input image')
parser.add_argument('--imgW', type=int, default=100, help='the width of the input image')
parser.add_argument('--rgb', action='store_true', help='use rgb input')
parser.add_argument('--character', type=str, default='0123456789abcdefghijklmnopqrstuvwxyz', help='character label')
parser.add_argument('--sensitive', action='store_true', default=True, help='for sensitive character mode')
parser.add_argument('--PAD', action='store_true', help='whether to keep ratio then pad for image resize')
parser.add_argument('--data_filtering_off', action='store_true', help='for data_filtering_off mode')
""" Model Architecture """
parser.add_argument('--num_fiducial', type=int, default=20, help='number of fiducial points of TPS-STN')
parser.add_argument('--input_channel', type=int, default=1, help='the number of input channel of Feature extractor')
parser.add_argument('--output_channel', type=int, default=512,
help='the number of output channel of Feature extractor')
parser.add_argument('--hidden_size', type=int, default=512, help='the size of the LSTM hidden state')
opt = parser.parse_args()
""" vocab / character number configuration """
if opt.sensitive:
opt.character = string.printable[:-6] # same with ASTER setting (use 94 char).
cudnn.benchmark = True
cudnn.deterministic = True
opt.num_gpu = torch.cuda.device_count()
test(opt)
|
the-stack_0_10086 | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import pickle
import time
import warnings
import mmcv
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.apis.active_learning import (
custom_logic_pretraining,
custom_logic_posttraining,
active_learning_inference,
MAX_IMAGE_HEIGHT,
MAX_IMAGE_WIDTH,
)
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import collect_env, get_root_logger
def parse_args():
parser = argparse.ArgumentParser(
description='Train a detector for active learning and use it for '
'active learning inference, with the default dataset '
'being `DatumaroV1Dataset`'
)
parser.add_argument(
'train_dataset_dir',
help="""Dataset directory for training. It should have the following
structure
train_dataset_dir/
├── annotations
│ ├── train.json
│ └── val.json
└── images
└── default
├── xxx.jpg
├── ...
└── yyy.jpg
where `train.json` and `val.json` should have already been
processed with
`mmdetection/tools/dataset_converters/datumaro_to_coco.py`.
""",
)
parser.add_argument(
'inference_dataset_dir',
help="Dataset directory for AL inference. To be used with "
"`inference_patterns`",
)
parser.add_argument('work_dir', help='the dir to save logs and models')
parser.add_argument(
'--config',
help='train config file path',
default='configs/_active_learning_/faster_rcnn_r50_fpn_1x_datumaro.py',
)
parser.add_argument(
'--inference_patterns',
type=str,
nargs="+",
default=["*.jpg", "*.png", "*.jpeg"],
help="Search patterns for data. For example, in a image-based task, "
"one should specify ['*.jpg', '*.png', '*.jpeg']",
)
parser.add_argument(
'--max-image-width',
help='Maximum image width',
default=MAX_IMAGE_WIDTH,
)
parser.add_argument(
'--max-image-height',
help='Maximum image height',
default=MAX_IMAGE_HEIGHT,
)
parser.add_argument(
'--no-autoscale-lr',
action="store_true",
help='Whether NOT to auto-scale the learning rate based on the batch '
'size and number of GPUs. By default lr autoscaling is enabled.',
)
parser.add_argument(
'--backbone-path',
help='Whether NOT to auto-scale the learning rate based on the batch '
'size and number of GPUs. By default lr autoscaling is enabled.',
required=False,
type=str,
)
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
"""
======================================================================
Basic settings. Largely remain the same as the original `train.py` script.
======================================================================
"""
args = parse_args()
cfg = Config.fromfile(args.config)
orig_batch_size = cfg.data.samples_per_gpu
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.work_dir = args.work_dir
cfg.auto_resume = False
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids
else:
cfg.gpu_ids = range(1) if args.gpus is None else range(args.gpus)
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(
f'We treat {cfg.gpu_ids} as gpu-ids, and reset to '
f'{cfg.gpu_ids[0:1]} as gpu-ids to avoid potential error in '
'non-distribute training time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
cfg.distributed = distributed
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# set random seeds
seed = init_random_seed(args.seed)
logger.info(f'Set random seed to {seed}, '
f'deterministic: False')
set_random_seed(seed, deterministic=False)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
"""
======================================================================
Custom pre-training logic.
======================================================================
"""
# Set custom attributes
custom_logic_pretraining(cfg, args, logger, orig_batch_size)
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
"""
======================================================================
Define model, datasets, etc. then start training.
======================================================================
"""
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
datasets = [build_dataset(cfg.data.train)]
logger.info(f"Number of training samples: {len(datasets[0])}")
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
runner = train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=True,
timestamp=timestamp,
meta=meta)
"""
======================================================================
Custom post-training logic.
======================================================================
"""
custom_logic_posttraining(runner, cfg, logger)
"""
======================================================================
Active learning inference.
======================================================================
"""
results = active_learning_inference(
cfg=cfg,
model=model,
data_dir=args.inference_dataset_dir,
patterns=args.inference_patterns,
logger=logger,
)
# Consolidate results
mAPs = runner.meta["all_metrics"]["bbox_mAP"]
best_performance = max(mAPs) if len(mAPs) > 0 else -1
cat2label = datasets[0].cat2label
# Sanity checks to make sure that original classes are still preserved
assert all(k == v for k, v in cat2label.items()) # sanity check
assert len(cat2label) == len(datasets[0].CLASSES)
categories = [
{"id": i, "name": cat} for i, cat in enumerate(datasets[0].CLASSES)]
results = {
"images": results,
"model_performance": best_performance,
"classes": categories,
}
# Save
with open(osp.join(cfg.work_dir, "al_inference.pkl"), "wb") as fout:
pickle.dump(results, fout)
if __name__ == '__main__':
main()
|
the-stack_0_10088 | from invmonInfra.enum import InventoryLastStatusEnum
from invmonService import FirefoxDriverService, HtmlParser, BasicLoggerService
from invmonInfra.base import JobsInventoryBase
from invmonInfra.domain import JobsInventoryInterface, DriverInterface, LoggerInterface
from invmonInfra.models import InventorySqlModel
class JobShopDisneyService(JobsInventoryBase, JobsInventoryInterface):
_logger: LoggerInterface
_driver: DriverInterface
_parser: HtmlParser
_urlPattern: str = '%shopdisney.com%'
def __init__(self, logger: LoggerInterface, driver = DriverInterface) -> None:
self._logger = logger
self._driver = driver
self._parser = HtmlParser()
self._parser.setLogger(self._logger)
def __checkInventoryStatus__(self, item: InventorySqlModel) -> None:
# Set the URI
self._logger.info(f"Checking '{item.url}'")
self.setUri(item.url)
self._driver.driverGoTo(self.getUri())
self.parser = HtmlParser(sourceCode=self._driver.driverGetContent())
# validate the HTML format didnt change
# Check if 'out of stock' is present
outOfStock = self.__checkIfOutOfStock__(tag='div', key='class', value='product-oos-info-title')
#inStock = self.checkIfInStock()
if outOfStock == True:
# if the lastStatus didnt change, move on
self._logger.debug("Item is out of stock")
if item.lastStatus == InventoryLastStatusEnum.OUTOFSTOCK.value:
self._logger.debug("Inventory Status didnt change, checking the next item.")
return None
item.lastStatus = InventoryLastStatusEnum.OUTOFSTOCK.value
if outOfStock == False:
# if the lastStatus didnt change, move on
self._logger.debug("Item is in stock!")
if item.lastStatus == InventoryLastStatusEnum.INSTOCK.value:
self._logger.debug("Inventory Status didnt change, checking the next item.")
return None
item.lastStatus = InventoryLastStatusEnum.INSTOCK.value
self.__updateInventoryRecord__(item)
self.__addAlerts__(item)
def checkIfInStock(self) -> bool:
invStatus: str = self.parser.findSingle(name='div', attrKey='class', attrValue='col-12 prices-add-to-cart-actions'
)
|
the-stack_0_10091 | #!/usr/bin/python3
# all arguments to this script are considered as json files
# and attempted to be formatted alphabetically
import json
import os
from sys import argv
files = argv[1:]
for file in files[:]:
if os.path.isdir(file):
files.remove(file)
for f in os.listdir(file):
files.append(os.path.join(file, f))
for file in files:
if not file.endswith('.json'):
continue
print("formatting file {}".format(file))
with open(file) as f:
j = json.load(f)
if isinstance(j, list):
for item in j:
item["Exposes"] = sorted(item["Exposes"], key=lambda k: k["Type"])
else:
j["Exposes"] = sorted(j["Exposes"], key=lambda k: k["Type"])
with open(file, 'w') as f:
f.write(json.dumps(j, indent=4, sort_keys=True, separators=(',', ': ')))
|
the-stack_0_10092 | from aces import Aces
class sub(Aces):
def submit(self):
opt=dict(
units="metal",
species="graphene_knot",
method="nvt",
nodes=1,
procs=4,
queue="q1.4",
runTime=500000
,runner="strain"
)
for T in range(100,300,20):
app=dict(vStrain=True,reverseStrain=True,equTime=200000,T=T,strainStep=1000,minStrain=-0.15,maxStrain=0.05,timestep=.3e-3,latx=70,laty=2)
self.commit(opt,app);
if __name__=='__main__':
sub().run()
|
the-stack_0_10093 | #!/usr/bin/python
# Copyright (c) 2014 Wladmir J. van der Laan
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
Script to generate list of seed nodes for chainparams.cpp.
This script expects two text files in the directory that is passed as an
argument:
nodes_main.txt
nodes_test.txt
These files must consist of lines in the format
<ip>
<ip>:<port>
[<ipv6>]
[<ipv6>]:<port>
<onion>.onion
0xDDBBCCAA (IPv4 little-endian old pnSeeds format)
The output will be two data structures with the peers in binary format:
static SeedSpec6 pnSeed6_main[]={
...
}
static SeedSpec6 pnSeed6_test[]={
...
}
These should be pasted into `src/chainparamsseeds.h`.
'''
from __future__ import print_function, division
from base64 import b32decode
from binascii import a2b_hex
import sys, os
import re
# ipv4 in ipv6 prefix
pchIPv4 = bytearray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff])
# tor-specific ipv6 prefix
pchOnionCat = bytearray([0xFD,0x87,0xD8,0x7E,0xEB,0x43])
def name_to_ipv6(addr):
if len(addr)>6 and addr.endswith('.onion'):
vchAddr = b32decode(addr[0:-6], True)
if len(vchAddr) != 16-len(pchOnionCat):
raise ValueError('Invalid onion %s' % s)
return pchOnionCat + vchAddr
elif '.' in addr: # IPv4
return pchIPv4 + bytearray((int(x) for x in addr.split('.')))
elif ':' in addr: # IPv6
sub = [[], []] # prefix, suffix
x = 0
addr = addr.split(':')
for i,comp in enumerate(addr):
if comp == '':
if i == 0 or i == (len(addr)-1): # skip empty component at beginning or end
continue
x += 1 # :: skips to suffix
assert(x < 2)
else: # two bytes per component
val = int(comp, 16)
sub[x].append(val >> 8)
sub[x].append(val & 0xff)
nullbytes = 16 - len(sub[0]) - len(sub[1])
assert((x == 0 and nullbytes == 0) or (x == 1 and nullbytes > 0))
return bytearray(sub[0] + ([0] * nullbytes) + sub[1])
elif addr.startswith('0x'): # IPv4-in-little-endian
return pchIPv4 + bytearray(reversed(a2b_hex(addr[2:])))
else:
raise ValueError('Could not parse address %s' % addr)
def parse_spec(s, defaultport):
match = re.match('\[([0-9a-fA-F:]+)\](?::([0-9]+))?$', s)
if match: # ipv6
host = match.group(1)
port = match.group(2)
else:
(host,_,port) = s.partition(':')
if not port:
port = defaultport
else:
port = int(port)
host = name_to_ipv6(host)
return (host,port)
def process_nodes(g, f, structname, defaultport):
g.write('static SeedSpec6 %s[] = {\n' % structname)
first = True
for line in f:
comment = line.find('#')
if comment != -1:
line = line[0:comment]
line = line.strip()
if not line:
continue
if not first:
g.write(',\n')
first = False
(host,port) = parse_spec(line, defaultport)
hoststr = ','.join(('0x%02x' % b) for b in host)
g.write(' {{%s}, %i}' % (hoststr, port))
g.write('\n};\n')
def main():
if len(sys.argv)<2:
print(('Usage: %s <path_to_nodes_txt>' % sys.argv[0]), file=sys.stderr)
exit(1)
g = sys.stdout
indir = sys.argv[1]
g.write('#ifndef H_CHAINPARAMSSEEDS\n')
g.write('#define H_CHAINPARAMSSEEDS\n')
g.write('// List of fixed seed nodes for the bitcoin network\n')
g.write('// AUTOGENERATED by contrib/devtools/generate-seeds.py\n\n')
g.write('// Each line contains a 16-byte IPv6 address and a port.\n')
g.write('// IPv4 as well as onion addresses are wrapped inside a IPv6 address accordingly.\n')
with open(os.path.join(indir,'nodes_main.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_main', 8273)
g.write('\n')
with open(os.path.join(indir,'nodes_test.txt'),'r') as f:
process_nodes(g, f, 'pnSeed6_test', 8272)
g.write('#endif\n')
if __name__ == '__main__':
main()
|
the-stack_0_10096 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class Operations:
"""Operations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerregistry.v2019_05_01_preview.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def list(
self,
**kwargs
) -> AsyncIterable["models.OperationListResult"]:
"""Lists all of the available Azure Container Registry REST API operations.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either OperationListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.containerregistry.v2019_05_01_preview.models.OperationListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.OperationListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2017-10-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('OperationListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/providers/Microsoft.ContainerRegistry/operations'} # type: ignore
|
the-stack_0_10098 | import cv2
import numpy as np
from .utils import load_json, load_value_file
def get_video_names_and_annotations(data, subset):
"""Selects clips of a given subset from the parsed json annotation"""
video_names = []
annotations = []
for key, value in data['database'].items():
this_subset = value['subset']
if this_subset == subset:
video_name = key
label = value['annotations'].get('label', '')
if label:
video_name = label + '/' + video_name
video_names.append(video_name)
annotations.append(value)
return video_names, annotations
def get_video_props(video_path, video_format, annotation):
"""Tries to read video properties (total number of frames and FPS) from annotation
file or read it from file otherwise"""
n_frames = annotation.get('n_frames')
fps = annotation.get('fps')
if n_frames and fps:
return n_frames, fps
if video_format == 'frames':
if not video_path.exists():
return 0, 0
n_frames = int(load_value_file(video_path / 'n_frames'))
fps = 30
else:
cap = cv2.VideoCapture(video_path.as_posix())
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
fps = cap.get(cv2.CAP_PROP_FPS)
return n_frames, fps
def load_json_annotation(root_path, annotation_path, subset, flow_path=None, video_format='frames'):
"""Load annotation in ActivityNet-like format"""
data = load_json(annotation_path)
video_names, annotations = get_video_names_and_annotations(data, subset)
idx_to_class = dict(enumerate(data['labels']))
class_to_idx = {v: k for k, v in idx_to_class.items()}
videos = []
for i, (video_name, annotation) in enumerate(zip(video_names, annotations)):
if i % 1000 == 0:
print('dataset loading [{}/{}]'.format(i, len(video_names)))
if video_format == 'video' and not video_name.lower().endswith('.mp4'):
video_name += '.mp4'
video_path = root_path / video_name
n_frames, fps = get_video_props(video_path, video_format, annotation)
if n_frames == 0:
continue
flow_full_path = flow_path
if flow_path is not None:
flow_full_path = (flow_path / video_name).as_posix()
try:
video_id = video_name.split('/')[1]
except IndexError:
video_id = video_name
def add_sample(begin_frame, end_frame, label):
sample = {
'video': video_path.as_posix(),
'flow': flow_full_path,
'segment': [begin_frame, end_frame],
'n_frames': n_frames,
'fps': fps,
'video_id': video_id,
'label': class_to_idx[label]
}
videos.append(sample)
video_annotation = annotation['annotations']
events_annotation = video_annotation.get('events', None)
if events_annotation is not None:
for event in events_annotation:
begin_time = float(event['start'])
end_time = float(event['stop'])
label = event['event']
assert label in class_to_idx
# From time to frame number.
timestamps = video_annotation['timestamps']
begin_frame, end_frame = np.searchsorted(timestamps, [begin_time, end_time])
# Frame indices are one-based.
begin_frame += 1
if begin_frame < end_frame:
add_sample(begin_frame, end_frame, label)
else:
begin_frame = 1
end_frame = n_frames
add_sample(begin_frame, end_frame, annotation['annotations']['label'])
return videos, idx_to_class
|
the-stack_0_10099 |
import smtplib
import typing
import flask
import flask_mail
from ... import mail
from . import core
from ...models import BackgroundTask, BackgroundTaskStatus
def post_send_mail_task(
subject: str,
recipients: typing.List[str],
text: str,
html: str,
auto_delete: bool = True
) -> typing.Tuple[BackgroundTaskStatus, typing.Optional[BackgroundTask]]:
return core.post_background_task(
type='send_mail',
data={
'subject': subject,
'recipients': recipients,
'text': text,
'html': html
},
auto_delete=auto_delete
)
def handle_send_mail_task(
data: typing.Dict[str, typing.Any]
) -> bool:
try:
mail.send(flask_mail.Message(
subject=data['subject'],
sender=flask.current_app.config['MAIL_SENDER'],
recipients=data['recipients'],
body=data['text'],
html=data['html']
))
return True
except smtplib.SMTPRecipientsRefused:
return False
|
the-stack_0_10101 | from cirq_qaoa.cirq_max_cut_solver import define_grid_qubits, solve_maxcut
def main():
size = 2
steps = 2
qubits = define_grid_qubits(size=size)
qubit_pairs = [(qubits[0], qubits[1]), (qubits[0],
qubits[2]), (qubits[1], qubits[2])]
solve_maxcut(qubit_pairs=qubit_pairs, steps=steps)
if __name__ == '__main__':
main()
|
the-stack_0_10104 | # -*- coding: utf-8 -*-
# *****************************************************************************
# NICOS, the Networked Instrument Control System of the MLZ
# Copyright (c) 2009-2022 by the NICOS contributors (see AUTHORS)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Module authors:
# Mark Koennecke <[email protected]>
# Jakob Lass <[email protected]>
#
# *****************************************************************************
"""
This is part of the TAS library which implemnts Mark Lumsden's UB matrix
algorithm for triple axis. See J. Appl. Cryst. (2005). 38, 405-411
https://doi.org/10.1107/S0021889805004875 for reference.
The original implementation was in ANSII-C by Mark Koennecke at PSI.
This implementation has been ported from C to python by Jakob Lass, then
also at PSI
"""
from copy import deepcopy
import numpy as np
from nicos_sinq.sxtal.singlexlib import matFromTwoVectors
from nicos_sinq.sxtal.trigd import Acosd, Atand2, Cosd, Rtand, Sind, \
angleBetween
def tasAngleBetween(v1, v2):
return np.rad2deg(angleBetween(v1, v2))
def fmod(x, y):
s = np.sign(x)
res = s*np.mod(np.abs(x), y)
return res
class tasQEPosition():
def __init__(self, ki, kf, qh, qk, ql, qm):
self.ki = ki
self.kf = kf
self.qh = qh
self.qk = qk
self.ql = ql
self.qm = qm
class tasAngles():
def __init__(self, monochromator_two_theta, a3,
sample_two_theta, sgl, sgu,
analyzer_two_theta):
self.monochromator_two_theta = monochromator_two_theta
self.a3 = a3
self.sample_two_theta = sample_two_theta
self.sgu = sgu
self.sgl = sgl
self.analyzer_two_theta = analyzer_two_theta
class tasReflection():
def __init__(self, qe=None, angles=None, ki=None, kf=None,
qh=None, qk=None, ql=None, qm=None,
monochromator_two_theta=None, a3=None,
sample_two_theta=None,
sgl=None, sgu=None, analyzer_two_theta=None):
if isinstance(qe, tasReflection):
self.qe = deepcopy(qe.qe)
self.angles = deepcopy(qe.angles)
else:
if qe is None:
self.qe = tasQEPosition(ki, kf, qh, qk, ql, qm)
else:
self.qe = qe
if angles is None:
self.angles = tasAngles(monochromator_two_theta,
a3, sample_two_theta,
sgl, sgu,
analyzer_two_theta)
else:
self.angles = angles
def __getattr__(self, key):
# if key in ['qe','angles']: # Is automatically tested
# return self.__dict__[key]
if key in self.qe.__dict__.keys():
return getattr(self.qe, key)
elif key in self.angles.__dict__.keys():
return getattr(self.angles, key)
else:
raise AttributeError(
"'tasReflection' object hs no attribute '{}'".format(key))
ECONST = 2.072 # 2.072122396
def energyToK(energy):
"""Convert energy in meV to K in q/A"""
return np.sqrt(energy / ECONST)
def KToEnergy(K):
"""Convert K in 1/A to E in meV"""
return ECONST*np.power(K, 2.0)
def tasReflectionToHC(r, B):
"""Calculate HC from HKL and B matrix"""
return tasHKLToHC(r.qh, r.qk, r.ql, B)
def tasHKLToHC(qh, qk, ql, B):
"""Calculate HC from reflection r and B matrix"""
h = np.array([qh, qk, ql])
hc = np.dot(B, h)
return hc
def calcTheta(ki, kf, two_theta):
"""
|ki| - |kf|cos(two_theta)
tan(theta) = --------------------------
|kf|sin(two_theta)
"""
return Rtand(np.abs(ki) - np.abs(kf) * Cosd(two_theta),
np.abs(kf) * Sind(two_theta))
def tasAngleBetweenReflections(B, r1, r2):
"""Calculate angle between two reflections"""
return tasAngleBetweenReflectionsHKL(B,
r1.qh, r1.qk, r1.ql,
r2.qh, r2.qk, r2.ql)
def tasAngleBetweenReflectionsHKL(B,
h1, k1, l1,
h2, k2, l2):
"""Calculate angle between two reflections"""
v1 = np.array([h1, k1, l1])
v2 = np.array([h2, k2, l2])
chi1 = np.einsum('ij,j...->i...', B, v1)
chi2 = np.einsum('ij,j...->i...', B, v2)
angle = tasAngleBetween(chi1, chi2)
return angle
def uFromAngles(om, sgu, sgl):
u = np.array([Cosd(om)*Cosd(sgl),
-Sind(om)*Cosd(sgu)+Cosd(om)*Sind(sgl)*Sind(sgu),
Sind(om)*Sind(sgu)+Cosd(om)*Sind(sgl)*Cosd(sgu)])
return u
def calcTasUVectorFromAngles(rr):
ss = np.sign(rr.sample_two_theta)
r = tasReflection(rr)
r.sample_two_theta = np.abs(r.sample_two_theta)
theta = calcTheta(r.ki, r.kf, r.sample_two_theta)
om = r.angles.a3 - ss*theta
m = uFromAngles(om, r.angles.sgu, ss*r.angles.sgl)
return m
def tasReflectionToQC(r, UB):
return tasReflectionToQCHKL(r.qh, r.qk, r.ql, UB)
def tasReflectionToQCHKL(h, k, ll, UB):
Q = np.array([h, k, ll])
return np.einsum('ij,j...->i...', UB, Q)
def makeAuxReflection(B, r1, ss, hkl):
r2 = tasReflection(r1)
r2.qe.qh, r2.qe.qk, r2.qe.ql = hkl
theta = calcTheta(r1.qe.ki, r1.qe.kf,
ss*r1.angles.sample_two_theta)
om = r1.angles.a3 - ss*theta
om += tasAngleBetweenReflectionsHKL(B, r1.qh, r1.qk,
r1.ql, *hkl)
QC = tasReflectionToHC(r2.qe, B)
q = np.linalg.norm(QC)
cos2t = np.divide(r1.ki * r1.ki + r1.kf * r1.kf - q * q,
(2. * np.abs(r1.ki) * np.abs(r1.kf)))
if np.abs(cos2t) > 1.:
raise RuntimeError('Scattering angle not closed!')
r2.angles.sample_two_theta = ss * Acosd(cos2t)
theta = calcTheta(r1.qe.ki, r1.qe.kf, ss*r2.angles.sample_two_theta)
r2.angles.a3 = om + ss*theta
r2.angles.a3 = fmod(r2.angles.a3 + ss*180., 360.) - ss*180.
return r2
def calcTwoTheta(B, ref, ss):
QC = tasReflectionToHC(ref, B)
q = np.linalg.norm(QC)
cos2t = np.divide(ref.ki * ref.ki + ref.kf * ref.kf - q * q,
(2. * np.abs(ref.ki) * np.abs(ref.kf)))
if np.abs(cos2t) > 1.:
raise RuntimeError(
'Calculated abs(cos2t) value {} bigger than 1!'
' Scattering angle not closed'.format(np.abs(cos2t)))
value = ss * Acosd(cos2t)
return value
def calcPlaneNormal(r1, r2):
u1 = calcTasUVectorFromAngles(r1)
u2 = calcTasUVectorFromAngles(r2)
planeNormal = np.cross(u1, u2)
planeNormal *= 1.0/np.linalg.norm(planeNormal)
# In TasCode code is commented out performing check
# for sign of planeNormal[2] is performed.
# If negative, z component negated.
planeNormal[2] = np.abs(planeNormal[2])
return planeNormal
def calcTasUBFromTwoReflections(cell, r1, r2):
B = cell.calculateBMatrix()
h1 = tasReflectionToHC(r1.qe, B)
h2 = tasReflectionToHC(r2.qe, B)
HT = matFromTwoVectors(h1, h2)
# calculate U vectors and UT matrix
u1 = calcTasUVectorFromAngles(r1)
u2 = calcTasUVectorFromAngles(r2)
UT = matFromTwoVectors(u1, u2)
# UT = U * HT
U = np.dot(UT, HT.T)
UB = np.dot(U, B)
return UB
def buildRMatrix(UB, planeNormal, qe):
U1V = tasReflectionToQC(qe, UB)
U1V *= 1.0/np.linalg.norm(U1V)
U2V = np.cross(planeNormal, U1V)
if np.linalg.norm(U2V) < .0001:
raise RuntimeError('Found vector is too short')
TV = buildTVMatrix(U1V, U2V)
TVINV = np.linalg.inv(TV)
return TVINV
def buildTVMatrix(U1V, U2V):
U2V *= 1.0/np.linalg.norm(U2V)
T3V = np.cross(U1V, U2V)
T3V *= 1.0/np.linalg.norm(T3V)
T = np.zeros((3, 3))
for i in range(3):
T[i][0] = U1V[i]
T[i][1] = U2V[i]
T[i][2] = T3V[i]
return T
def calcTasQAngles(UB, planeNormal, ss, a3offset, qe):
R = buildRMatrix(UB, planeNormal, qe)
angles = tasAngles(0, 0, 0, 0, 0, 0)
cossgl = np.sqrt(R[0][0]*R[0][0]+R[1][0]*R[1][0])
angles.sgl = ss*Atand2(-R[2][0], cossgl)
if np.abs(angles.sgl - 90.) < .5:
raise RuntimeError('Combination of UB and Q is not valid')
# Now, this is slightly different then in the publication by M. Lumsden.
# The reason is that the atan2 helps to determine the sign of om
# whereas the sin, cos formula given by M. Lumsden yield ambiguous signs
# especially for om.
# sgu = atan(R[2][1],R[2][2]) where:
# R[2][1] = cos(sgl)sin(sgu)
# R[2][2] = cos(sgu)cos(sgl)
# om = atan(R[1][0],R[0][0]) where:
# R[1][0] = sin(om)cos(sgl)
# R[0][0] = cos(om)cos(sgl)
# The definitions of the R components are taken from M. Lumsden
# R-matrix definition.
om = Atand2(R[1][0]/cossgl, R[0][0]/cossgl)
angles.sgu = Atand2(R[2][1]/cossgl, R[2][2]/cossgl)
QC = tasReflectionToQC(qe, UB)
# q = 2.*np.pi*np.linalg.norm(QC)
q = np.linalg.norm(QC)
cos2t = (qe.ki * qe.ki + qe.kf * qe.kf -
q * q) / (2. * np.abs(qe.ki) * np.abs(qe.kf))
if np.abs(cos2t) > 1.:
raise RuntimeError('Scattering angle cannot '
'be closed, cos2t = ', cos2t)
theta = calcTheta(qe.ki, qe.kf, Acosd(cos2t))
angles.sample_two_theta = ss * Acosd(cos2t)
angles.a3 = om + ss*theta + a3offset
#
# put a3 into -180, 180 properly. We can always turn by 180
# because the scattering geometry is symmetric in this respect.
# It is like looking at the scattering plane from the other side
angles.a3 = fmod(angles.a3 + ss*180., 360.) - ss*180.
return angles
def calcScatteringPlaneNormal(qe1, qe2):
v1 = [qe1.qh, qe1.qk, qe1.ql]
v2 = [qe2.qh, qe2.qk, qe2.ql]
planeNormal = np.cross(v1, v2)
planeNormal *= 1.0/np.linalg.norm(planeNormal)
return planeNormal
def calcTasQH(ub, angles, ki, kf):
ubinv = np.linalg.inv(ub)
om = angles.a3
sample_two_theta = angles.sample_two_theta
sgu = angles.sgu
sgl = angles.sgl
ss = np.sign(sample_two_theta)
theta = calcTheta(ki, kf, abs(sample_two_theta))
om = om - ss*theta
qv = uFromAngles(om, sgu, ss*sgl)
# normalize the QV vector to be the length of the Q vector
# Thereby take into account the physicists magic fudge
# 2PI factor
q = np.sqrt(ki**2 + kf**2 -
2. * ki * kf * Cosd(sample_two_theta))
# The line below depends on the 2PI conventions.
q /= np.pi*2.
qv *= q
return ubinv.dot(qv)
|
the-stack_0_10106 | from telethon.tl.functions.account import UpdateProfileRequest
from telethon.tl.functions.photos import DeletePhotosRequest, UploadProfilePhotoRequest
from telethon.tl.functions.users import GetFullUserRequest
from telethon.tl.types import InputPhoto
from userbot import CMD_HELP, LOGS, STORAGE, bot
from userbot.events import register
if not hasattr(STORAGE, "userObj"):
STORAGE.userObj = False
@register(outgoing=True, pattern=r"^\.impostor ?(.*)")
async def impostor(event):
inputArgs = event.pattern_match.group(1)
if "restore" in inputArgs:
await event.edit("**Voltando à minha verdadeira identidade...**")
if not STORAGE.userObj:
return await event.edit(
"**Você precisa clonar um perfil antes de reverter!**"
)
await updateProfile(STORAGE.userObj, restore=True)
return await event.edit("**Revertido com sucesso!**")
if inputArgs:
try:
user = await event.client.get_entity(inputArgs)
except:
return await event.edit("**Nome de usuário/ID inválido.**")
userObj = await event.client(GetFullUserRequest(user))
elif event.reply_to_msg_id:
replyMessage = await event.get_reply_message()
if replyMessage.sender_id is None:
return await event.edit("**Não é possível se passar por administradores anônimos, sed.**")
userObj = await event.client(GetFullUserRequest(replyMessage.sender_id))
else:
return await event.edit(
"**Use** `.help impersonate` **para aprender como usá-lo.**"
)
if not STORAGE.userObj:
STORAGE.userObj = await event.client(GetFullUserRequest(event.sender_id))
LOGS.info(STORAGE.userObj)
await event.edit("**Roubando a identidade dessa pessoa aleatória...**")
await updateProfile(userObj)
await event.edit("**Eu sou você e você é eu, somos um só.**")
async def updateProfile(userObj, restore=False):
firstName = (
"Deleted Account"
if userObj.user.first_name is None
else userObj.user.first_name
)
lastName = "" if userObj.user.last_name is None else userObj.user.last_name
userAbout = userObj.about if userObj.about is not None else ""
userAbout = "" if len(userAbout) > 70 else userAbout
if restore:
userPfps = await bot.get_profile_photos("me")
userPfp = userPfps[0]
await bot(
DeletePhotosRequest(
id=[
InputPhoto(
id=userPfp.id,
access_hash=userPfp.access_hash,
file_reference=userPfp.file_reference,
)
]
)
)
else:
try:
userPfp = userObj.profile_photo
pfpImage = await bot.download_media(userPfp)
await bot(UploadProfilePhotoRequest(await bot.upload_file(pfpImage)))
except BaseException:
pass
await bot(
UpdateProfileRequest(about=userAbout, first_name=firstName, last_name=lastName)
)
CMD_HELP.update(
{
"impostor": ">`.impostor` (como uma resposta a uma mensagem de um usuário)\
\n**Uso:** Rouba a identidade do usuário.\
\n\n>`.impostor <username/ID>`\
\n**Uso:** Rouba do nome de usuário/ID fornecido.\
\n\n>`.impostor restore`\
\n**Uso:** Reverta para sua verdadeira identidade.\
\n\n**Sempre restaure antes de executá-lo novamente.**\
"
}
)
|
the-stack_0_10111 |
import tensorflow as tf
import os
import time
from tqdm import tqdm
from src.utils import get_cli_params, process_cli_params, \
order_param_settings
from src.lva import build_graph, measure_smoothness, VERBOSE
from src.train import evaluate_metric_list, update_decays, evaluate_metric
import numpy as np
def main(p):
p = process_cli_params(p)
global VERBOSE
VERBOSE = p.verbose
# -----------------------------
# Set GPU device to use
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(p.which_gpu)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
# Set seeds
np.random.seed(p.seed)
tf.set_random_seed(p.seed)
# Load data
print("=== Loading Data ===")
if p.dataset == 'svhn':
from src.svhn import read_data_sets
dataset = read_data_sets(
"../../data/svhn/",
n_labeled=p.num_labeled,
validation_size=p.validation,
one_hot=True,
disjoint=False,
downsample=True,
download_and_extract=True
)
else:
from src.mnist import read_data_sets
dataset = read_data_sets("MNIST_data",
n_labeled=p.num_labeled,
validation_size=p.validation,
one_hot=True,
disjoint=False)
num_examples = dataset.train.num_examples
p.num_examples = num_examples
if p.validation > 0:
dataset.test = dataset.validation
p.iter_per_epoch = (num_examples // p.ul_batch_size)
p.num_iter = p.iter_per_epoch * p.end_epoch
# -----------------------------
# Build graph
g, m, trainable_parameters = build_graph(p)
# Collect losses
train_losses = [m['loss'], m['cost'], m['uc'], m['vc']]
test_losses = [m['cost']]
aer = tf.constant(100.0) - m['acc']
if p.measure_smoothness:
s = measure_smoothness(g, p)
# print(s.get_shape())
train_losses.append(tf.reduce_mean(s))
if p.tb is not False:
train_merged = tf.summary.merge([
tf.summary.scalar(x) for x in train_losses
] + [tf.summary.scalar(aer)])
test_merged = tf.summary.merge([
tf.summary.scalar(x) for x in test_losses
] + [tf.summary.scalar(aer)])
# Set up tensorboard logging
if not os.path.exists(p.tb):
os.makedirs(p.tb_dir)
# -----------------------------
print("=== Starting Session ===")
sess = tf.Session(config=config)
i_iter = 0
# -----------------------------
id_seed_dir = p.id + "/" + "seed-{}".format(p.seed) + "/"
# Write logs to appropriate directory
log_dir = p.logdir + id_seed_dir
if not os.path.exists(log_dir):
os.makedirs(log_dir)
desc_file = log_dir + "description"
with open(desc_file, 'a') as f:
print(*order_param_settings(p), sep='\n', file=f, flush=True)
print("Trainable parameters:", trainable_parameters, file=f,
flush=True)
log_file = log_dir + "train_log"
# Resume from checkpoint
ckpt_dir = p.ckptdir + id_seed_dir
ckpt = tf.train.get_checkpoint_state(
ckpt_dir) # get latest checkpoint (if any)
if ckpt and ckpt.model_checkpoint_path:
# if checkpoint exists,
# restore the parameters
# and set epoch_n and i_iter
g['saver'].restore(sess, ckpt.model_checkpoint_path)
ep = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[1])
i_iter = (ep + 1) * p.iter_per_epoch
print("Restored Epoch ", ep)
else:
# no checkpoint exists.
# create checkpoints directory if it does not exist.
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
init = tf.global_variables_initializer()
sess.run(init)
i_iter = 0
if p.tb is not False:
train_writer = tf.summary.FileWriter(p.tb_dir + '/train', sess.graph)
test_writer = tf.summary.FileWriter(p.tb_dir + '/test', sess.graph)
# -----------------------------
print("=== Training ===")
# -----------------------------
def eval_metrics(dataset, sess, ops):
return evaluate_metric_list(dataset, sess, ops, graph=g, params=p)
def eval_metric(dataset, sess, op):
return evaluate_metric(dataset, sess, op, graph=g, params=p)
# Evaluate initial training accuracy and losses
# init_loss = evaluate_metric(
# mnist.train.labeled_ds, sess, cost)
with open(desc_file, 'a') as f:
print('================================', file=f, flush=True)
print("Initial Train AER: ",
eval_metric(dataset.train.labeled_ds, sess, aer),
"%", file=f, flush=True)
# -----------------------------
# Evaluate initial testing accuracy and cross-entropy loss
print("Initial Test AER: ",
eval_metric(dataset.test, sess, aer),
"%", file=f, flush=True)
# print("Initial Test Losses: ",
# *eval_metrics(
# mnist.test, sess, test_losses), file=f,
# flush=True)
train_dict = {g['beta1']: p.beta1, g['lr']: p.initial_learning_rate}
start = time.time()
for i in range(i_iter, p.iter_per_epoch * p.end_epoch):
images, labels = dataset.train.next_batch(p.batch_size, p.ul_batch_size)
train_dict.update({
g['images']: images,
g['labels']: labels,
g['train_flag']: True})
_ = sess.run(
[g['train_step']],
feed_dict=train_dict)
if (i > 1) and ((i + 1) % p.iter_per_epoch == 0):
# Epoch complete?
ep = i // p.iter_per_epoch
# Update learning rate and momentum
if ((ep + 1) >= p.decay_start_epoch) and (
ep % (p.lr_decay_frequency) == 0):
# epoch_n + 1 because learning rate is set for next epoch
ratio = 1.0 * (p.end_epoch - (ep + 1))
decay_epochs = p.end_epoch - p.decay_start_epoch
ratio = max(0., ratio / decay_epochs) if decay_epochs != 0 else 1.0
train_dict[g['lr']] = (p.initial_learning_rate * ratio)
train_dict[g['beta1']] = p.beta1_during_decay
# For the last ten epochs, test every epoch
if (ep + 1) > (p.end_epoch - 10):
p.test_frequency_in_epochs = 1
# ---------------------------------------------
# Evaluate every test_frequency_in_epochs
if int((ep + 1) % p.test_frequency_in_epochs) == 0:
now = time.time() - start
if not p.do_not_save:
g['saver'].save(sess, ckpt_dir + 'model.ckpt', ep)
# ---------------------------------------------
# Compute error on testing set (10k examples)
test_aer_and_costs = \
eval_metrics(dataset.test, sess, [aer] + test_losses)
train_aer = eval_metrics(dataset.train.labeled_ds, sess, [aer])
train_costs = sess.run(train_losses,
feed_dict={g['images']: images,
g['labels']: labels,
g['train_flag']: False})
# Create log of:
# time, epoch number, test accuracy, test cross entropy,
# train accuracy, train loss, train cross entropy,
# train reconstruction loss, smoothness
log_i = [int(now), ep] + test_aer_and_costs + train_aer + \
train_costs
with open(log_file, 'a') as train_log:
print(*log_i, sep=',', flush=True, file=train_log)
with open(desc_file, 'a') as f:
final_aer = eval_metric(dataset.test, sess, aer)
print("Final AER: ", final_aer,
"%", file=f, flush=True)
sess.close()
if __name__ == '__main__':
p = get_cli_params()
main(p)
|
the-stack_0_10113 | # Time: O(|V| + |E|)
# Space: O(|V| + |E|)
import collections
class Solution(object):
def possibleBipartition(self, N, dislikes):
"""
:type N: int
:type dislikes: List[List[int]]
:rtype: bool
"""
adj = [[] for _ in xrange(N)]
for u, v in dislikes:
adj[u-1].append(v-1)
adj[v-1].append(u-1)
color = [0]*N
color[0] = 1
q = collections.deque([0])
while q:
cur = q.popleft()
for nei in adj[cur]:
if color[nei] == color[cur]:
return False
elif color[nei] == -color[cur]:
continue
color[nei] = -color[cur]
q.append(nei)
return True
|
the-stack_0_10115 | def process(uri: str):
f = open(uri, 'r')
stack = [int(x) for x in f.readline().replace('\n', '').strip().split('\t')]
prev_sets = []
steps = 0
while str(stack) not in prev_sets:
prev_sets.append(str(stack))
maxb = max(stack)
index = stack.index(maxb)
stack[index] = 0
for i in range(maxb):
index = (index + 1) if index != len(stack) - 1 else 0
stack[index] += 1
steps += 1
return steps - prev_sets.index(str(stack))
print(process('input_t'))
print(process('input'))
|
the-stack_0_10117 | #!/usr/bin/python
import sys
from typing import Any, Dict
from requests.api import get
import semver
import requests
repo: str = 'groovy-guru'
owner: str = 'DontShaveTheYak'
def do_action(action, version):
function = getattr(version, action)
new_version = function()
print(f'{version} {action} to {new_version}')
return new_version
def get_response(url) -> Dict[str, Any]:
response = requests.get(url)
return response.json()
def get_action(pull_request: str) -> str:
valid_labels = ['major','minor','patch']
response = get_response(f"https://api.github.com/repos/{owner}/{repo}/pulls/{pull_request}")
label = [label['name'] for label in response['labels'] if label['name'] in valid_labels][0]
return label
def set_output(name: str, value: str):
print(f"::set-output name={name}::{value}")
def get_latest_release() -> str:
response = get_response(f"https://api.github.com/repos/{owner}/{repo}/releases")
for release in response:
if not release['draft'] and not release['prerelease']:
return release
raise Exception('Unable to find production relase.')
latest_tag = sys.argv[1]
pull_request = sys.argv[2]
branch = sys.argv[3]
action_methods = {
'patch': 'bump_patch',
'minor': 'bump_minor',
'major': 'bump_major'
}
if branch != "master":
action_name = get_action(pull_request)
action = action_methods[action_name]
next_version: str = ''
print(f'Latest tag is {latest_tag}')
latest_release = get_latest_release()
release_tag = latest_release['tag_name']
print(f'Latest release is {release_tag}')
if branch == 'master':
print("This release is a final release!")
base_tag = latest_tag.split("-")[0]
bump_rule = "None"
set_output('next_tag', base_tag)
sys.exit(0)
if '-SNAPSHOT' in latest_tag:
print('Checking if we can reuse latest tag.')
latest_tag = latest_tag.split('-')[0]
next_tag = semver.VersionInfo.parse(release_tag)
next_tag = do_action(action, next_tag)
latest_tag = semver.VersionInfo.parse(latest_tag)
compare = semver.compare(str(latest_tag),str(next_tag))
next_tag = f'{next_tag}-SNAPSHOT'
latest_tag = f'{latest_tag}-SNAPSHOT'
if compare == -1:
print(f'Creating {next_tag} because its version is higher than latest tag: {latest_tag}')
next_version = next_tag
elif compare == 1:
print(f'Reusing latest tag ({latest_tag}) because next tag ({next_tag}) is lower.')
next_version = latest_tag
else:
print(f'Reusing latest tag ({latest_tag}) because its version is equal to next tag ({next_tag})')
next_version = latest_tag
else:
# create new snapshot tag and exit
version = semver.VersionInfo.parse(latest_tag)
new_tag = do_action(action, version)
print(f'Creating new SNAPSHOT tag {new_tag}-SNAPSHOT')
next_version = f'{new_tag}-SNAPSHOT'
set_output('next_tag', next_version)
|
the-stack_0_10118 | # -----------------------------------------------------------------------------------------
# Code taken from https://github.com/iwantooxxoox/Keras-OpenFace (with minor modifications)
# -----------------------------------------------------------------------------------------
import tensorflow as tf
import numpy as np
import os
from numpy import genfromtxt
from keras.layers import Conv2D, ZeroPadding2D, Activation
from keras.layers.normalization import BatchNormalization
_FLOATX = 'float32'
def variable(value, dtype=_FLOATX, name=None):
v = tf.Variable(np.asarray(value, dtype=dtype), name=name)
_get_session().run(v.initializer)
return v
def shape(x):
return x.get_shape()
def square(x):
return tf.square(x)
def zeros(shape, dtype=_FLOATX, name=None):
return variable(np.zeros(shape), dtype, name)
def concatenate(tensors, axis=-1):
if axis < 0:
axis = axis % len(tensors[0].get_shape())
return tf.concat(axis, tensors)
def LRN2D(x):
return tf.nn.lrn(x, alpha=1e-4, beta=0.75)
def conv2d_bn(
x,
layer=None,
cv1_out=None,
cv1_filter=(1, 1),
cv1_strides=(1, 1),
cv2_out=None,
cv2_filter=(3, 3),
cv2_strides=(1, 1),
padding=None,
):
num = '' if cv2_out == None else '1'
tensor = Conv2D(cv1_out, cv1_filter, strides=cv1_strides, name=layer+'_conv'+num)(x)
tensor = BatchNormalization(axis=3, epsilon=0.00001, name=layer+'_bn'+num)(tensor)
tensor = Activation('relu')(tensor)
if padding == None:
return tensor
tensor = ZeroPadding2D(padding=padding)(tensor)
if cv2_out == None:
return tensor
tensor = Conv2D(cv2_out, cv2_filter, strides=cv2_strides, name=layer+'_conv'+'2')(tensor)
tensor = BatchNormalization(axis=3, epsilon=0.00001, name=layer+'_bn'+'2')(tensor)
tensor = Activation('relu')(tensor)
return tensor
weights = [
'conv1', 'bn1', 'conv2', 'bn2', 'conv3', 'bn3',
'inception_3a_1x1_conv', 'inception_3a_1x1_bn',
'inception_3a_pool_conv', 'inception_3a_pool_bn',
'inception_3a_5x5_conv1', 'inception_3a_5x5_conv2', 'inception_3a_5x5_bn1', 'inception_3a_5x5_bn2',
'inception_3a_3x3_conv1', 'inception_3a_3x3_conv2', 'inception_3a_3x3_bn1', 'inception_3a_3x3_bn2',
'inception_3b_3x3_conv1', 'inception_3b_3x3_conv2', 'inception_3b_3x3_bn1', 'inception_3b_3x3_bn2',
'inception_3b_5x5_conv1', 'inception_3b_5x5_conv2', 'inception_3b_5x5_bn1', 'inception_3b_5x5_bn2',
'inception_3b_pool_conv', 'inception_3b_pool_bn',
'inception_3b_1x1_conv', 'inception_3b_1x1_bn',
'inception_3c_3x3_conv1', 'inception_3c_3x3_conv2', 'inception_3c_3x3_bn1', 'inception_3c_3x3_bn2',
'inception_3c_5x5_conv1', 'inception_3c_5x5_conv2', 'inception_3c_5x5_bn1', 'inception_3c_5x5_bn2',
'inception_4a_3x3_conv1', 'inception_4a_3x3_conv2', 'inception_4a_3x3_bn1', 'inception_4a_3x3_bn2',
'inception_4a_5x5_conv1', 'inception_4a_5x5_conv2', 'inception_4a_5x5_bn1', 'inception_4a_5x5_bn2',
'inception_4a_pool_conv', 'inception_4a_pool_bn',
'inception_4a_1x1_conv', 'inception_4a_1x1_bn',
'inception_4e_3x3_conv1', 'inception_4e_3x3_conv2', 'inception_4e_3x3_bn1', 'inception_4e_3x3_bn2',
'inception_4e_5x5_conv1', 'inception_4e_5x5_conv2', 'inception_4e_5x5_bn1', 'inception_4e_5x5_bn2',
'inception_5a_3x3_conv1', 'inception_5a_3x3_conv2', 'inception_5a_3x3_bn1', 'inception_5a_3x3_bn2',
'inception_5a_pool_conv', 'inception_5a_pool_bn',
'inception_5a_1x1_conv', 'inception_5a_1x1_bn',
'inception_5b_3x3_conv1', 'inception_5b_3x3_conv2', 'inception_5b_3x3_bn1', 'inception_5b_3x3_bn2',
'inception_5b_pool_conv', 'inception_5b_pool_bn',
'inception_5b_1x1_conv', 'inception_5b_1x1_bn',
'dense_layer'
]
conv_shape = {
'conv1': [64, 3, 7, 7],
'conv2': [64, 64, 1, 1],
'conv3': [192, 64, 3, 3],
'inception_3a_1x1_conv': [64, 192, 1, 1],
'inception_3a_pool_conv': [32, 192, 1, 1],
'inception_3a_5x5_conv1': [16, 192, 1, 1],
'inception_3a_5x5_conv2': [32, 16, 5, 5],
'inception_3a_3x3_conv1': [96, 192, 1, 1],
'inception_3a_3x3_conv2': [128, 96, 3, 3],
'inception_3b_3x3_conv1': [96, 256, 1, 1],
'inception_3b_3x3_conv2': [128, 96, 3, 3],
'inception_3b_5x5_conv1': [32, 256, 1, 1],
'inception_3b_5x5_conv2': [64, 32, 5, 5],
'inception_3b_pool_conv': [64, 256, 1, 1],
'inception_3b_1x1_conv': [64, 256, 1, 1],
'inception_3c_3x3_conv1': [128, 320, 1, 1],
'inception_3c_3x3_conv2': [256, 128, 3, 3],
'inception_3c_5x5_conv1': [32, 320, 1, 1],
'inception_3c_5x5_conv2': [64, 32, 5, 5],
'inception_4a_3x3_conv1': [96, 640, 1, 1],
'inception_4a_3x3_conv2': [192, 96, 3, 3],
'inception_4a_5x5_conv1': [32, 640, 1, 1,],
'inception_4a_5x5_conv2': [64, 32, 5, 5],
'inception_4a_pool_conv': [128, 640, 1, 1],
'inception_4a_1x1_conv': [256, 640, 1, 1],
'inception_4e_3x3_conv1': [160, 640, 1, 1],
'inception_4e_3x3_conv2': [256, 160, 3, 3],
'inception_4e_5x5_conv1': [64, 640, 1, 1],
'inception_4e_5x5_conv2': [128, 64, 5, 5],
'inception_5a_3x3_conv1': [96, 1024, 1, 1],
'inception_5a_3x3_conv2': [384, 96, 3, 3],
'inception_5a_pool_conv': [96, 1024, 1, 1],
'inception_5a_1x1_conv': [256, 1024, 1, 1],
'inception_5b_3x3_conv1': [96, 736, 1, 1],
'inception_5b_3x3_conv2': [384, 96, 3, 3],
'inception_5b_pool_conv': [96, 736, 1, 1],
'inception_5b_1x1_conv': [256, 736, 1, 1],
}
def load_weights():
weightsDir = './weights'
fileNames = filter(lambda f: not f.startswith('.'), os.listdir(weightsDir))
paths = {}
weights_dict = {}
for n in fileNames:
paths[n.replace('.csv', '')] = weightsDir + '/' + n
for name in weights:
if 'conv' in name:
conv_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
conv_w = np.reshape(conv_w, conv_shape[name])
conv_w = np.transpose(conv_w, (2, 3, 1, 0))
conv_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
weights_dict[name] = [conv_w, conv_b]
elif 'bn' in name:
bn_w = genfromtxt(paths[name + '_w'], delimiter=',', dtype=None)
bn_b = genfromtxt(paths[name + '_b'], delimiter=',', dtype=None)
bn_m = genfromtxt(paths[name + '_m'], delimiter=',', dtype=None)
bn_v = genfromtxt(paths[name + '_v'], delimiter=',', dtype=None)
weights_dict[name] = [bn_w, bn_b, bn_m, bn_v]
elif 'dense' in name:
dense_w = genfromtxt(weightsDir+'/dense_w.csv', delimiter=',', dtype=None)
dense_w = np.reshape(dense_w, (128, 736))
dense_w = np.transpose(dense_w, (1, 0))
dense_b = genfromtxt(weightsDir+'/dense_b.csv', delimiter=',', dtype=None)
weights_dict[name] = [dense_w, dense_b]
return weights_dict
|
the-stack_0_10120 | import jax.numpy as jnp
import numpy as np
import netket as nk
import flax.linen as nn
class test(nn.Module):
@nn.compact
def __call__(self, x):
nothing = self.param("nothing", lambda *args: jnp.ones(1))
if len(x.shape) != 1:
return jnp.array(x.size * [1.0])
return 1.0
class test2(nn.Module):
@nn.compact
def __call__(self, x):
nothing = self.param("nothing", lambda *args: jnp.ones(1))
sol = jnp.sum(nothing ** 2 * x, axis=-1)
return sol
# continuous preparations
def v1(x):
return 1 / jnp.sqrt(2 * jnp.pi) * jnp.sum(jnp.exp(-0.5 * ((x - 2.5) ** 2)), axis=-1)
def v2(x):
return 1 / jnp.sqrt(2 * jnp.pi) * jnp.sum(jnp.exp(-0.5 * ((x - 2.5) ** 2)), axis=-1)
hilb = nk.hilbert.Particle(N=1, L=5, pbc=True)
pot = nk.operator.PotentialEnergy(hilb, v1)
kin = nk.operator.KineticEnergy(hilb, mass=1.0)
e = pot + kin
sab = nk.sampler.MetropolisGaussian(hilb, sigma=1.0, n_chains=16, n_sweeps=1)
model = test()
model2 = test2()
vs_continuous = nk.vqs.MCState(sab, model, n_samples=10 ** 6, n_discard=2000)
vs_continuous2 = nk.vqs.MCState(sab, model2, n_samples=10 ** 7, n_discard=2000)
def test_expect():
x = vs_continuous2.samples.reshape(-1, 1)
sol = vs_continuous.expect(pot)
O_stat, O_grad = vs_continuous2.expect_and_grad(e)
O_grad, _ = nk.jax.tree_ravel(O_grad)
O_grad_exact = 2 * jnp.dot(x.T, (v1(x) - jnp.mean(v1(x), axis=0))) / x.shape[0]
r"""
:math:`<V> = \int_0^5 dx V(x) |\psi(x)|^2 / \int_0^5 |\psi(x)|^2 = 0.1975164 (\psi = 1)`
:math:`<\nabla V> = \nabla_p \int_0^5 dx V(x) |\psi(x)|^2 / \int_0^5 |\psi(x)|^2 = -0.140256 (\psi = \exp(p^2 x))`
"""
np.testing.assert_allclose(0.1975164, sol.mean, atol=10 ** (-3))
np.testing.assert_allclose(-0.140256, 2 * O_grad, atol=10 ** (-3))
|
the-stack_0_10121 | # pylint: disable=g-bad-file-header
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Configuring the C++ toolchain on Windows."""
load(
"@bazel_tools//tools/cpp:lib_cc_configure.bzl",
"escape_string",
"auto_configure_fail",
"auto_configure_warning",
"get_env_var",
"which",
"which_cmd",
"execute",
"tpl",
"is_cc_configure_debug",
)
def _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = False):
"""Return the content of msys crosstool which is still the default CROSSTOOL on Windows."""
bazel_sh = get_env_var(repository_ctx, "BAZEL_SH").replace("\\", "/").lower()
tokens = bazel_sh.rsplit("/", 1)
prefix = "mingw64" if use_mingw else "usr"
msys_root = None
if tokens[0].endswith("/usr/bin"):
msys_root = tokens[0][:len(tokens[0]) - len("usr/bin")]
elif tokens[0].endswith("/bin"):
msys_root = tokens[0][:len(tokens[0]) - len("bin")]
if not msys_root:
auto_configure_fail(
"Could not determine MSYS/Cygwin root from BAZEL_SH (%s)" % bazel_sh)
escaped_msys_root = escape_string(msys_root)
return (((
' abi_version: "local"\n' +
' abi_libc_version: "local"\n' +
' builtin_sysroot: ""\n' +
' compiler: "msys-gcc"\n' +
' host_system_name: "local"\n' +
' needsPic: false\n' +
' target_libc: "msys"\n' +
' target_cpu: "x64_windows"\n' +
' target_system_name: "local"\n') if not use_mingw else '') +
' tool_path { name: "ar" path: "%s%s/bin/ar" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "compat-ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "cpp" path: "%s%s/bin/cpp" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "dwp" path: "%s%s/bin/dwp" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "gcc" path: "%s%s/bin/gcc" }\n' % (escaped_msys_root, prefix) +
' cxx_flag: "-std=gnu++0x"\n' +
' linker_flag: "-lstdc++"\n' +
' cxx_builtin_include_directory: "%s%s/"\n' % (escaped_msys_root, prefix) +
' tool_path { name: "gcov" path: "%s%s/bin/gcov" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "ld" path: "%s%s/bin/ld" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "nm" path: "%s%s/bin/nm" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "objcopy" path: "%s%s/bin/objcopy" }\n' % (escaped_msys_root, prefix) +
' objcopy_embed_flag: "-I"\n' +
' objcopy_embed_flag: "binary"\n' +
' tool_path { name: "objdump" path: "%s%s/bin/objdump" }\n' % (escaped_msys_root, prefix) +
' tool_path { name: "strip" path: "%s%s/bin/strip" }'% (escaped_msys_root, prefix) +
' feature { name: "targets_windows" implies: "copy_dynamic_libraries_to_binary" enabled: true }' +
' feature { name: "copy_dynamic_libraries_to_binary" }' )
def _get_system_root(repository_ctx):
r"""Get System root path on Windows, default is C:\\Windows. Doesn't %-escape the result."""
if "SYSTEMROOT" in repository_ctx.os.environ:
return escape_string(repository_ctx.os.environ["SYSTEMROOT"])
auto_configure_warning("SYSTEMROOT is not set, using default SYSTEMROOT=C:\\Windows")
return "C:\\Windows"
def _find_cuda(repository_ctx):
"""Find out if and where cuda is installed. Doesn't %-escape the result."""
if "CUDA_PATH" in repository_ctx.os.environ:
return repository_ctx.os.environ["CUDA_PATH"]
nvcc = which(repository_ctx, "nvcc.exe")
if nvcc:
return nvcc[:-len("/bin/nvcc.exe")]
return None
def _find_python(repository_ctx):
"""Find where is python on Windows. Doesn't %-escape the result."""
if "BAZEL_PYTHON" in repository_ctx.os.environ:
python_binary = repository_ctx.os.environ["BAZEL_PYTHON"]
if not python_binary.endswith(".exe"):
python_binary = python_binary + ".exe"
return python_binary
auto_configure_warning("'BAZEL_PYTHON' is not set, start looking for python in PATH.")
python_binary = which_cmd(repository_ctx, "python.exe")
auto_configure_warning("Python found at %s" % python_binary)
return python_binary
def _add_system_root(repository_ctx, env):
r"""Running VCVARSALL.BAT and VCVARSQUERYREGISTRY.BAT need %SYSTEMROOT%\\system32 in PATH."""
if "PATH" not in env:
env["PATH"] = ""
env["PATH"] = env["PATH"] + ";" + _get_system_root(repository_ctx) + "\\system32"
return env
def find_vc_path(repository_ctx):
"""Find Visual C++ build tools install path. Doesn't %-escape the result."""
# 1. Check if BAZEL_VC or BAZEL_VS is already set by user.
if "BAZEL_VC" in repository_ctx.os.environ:
return repository_ctx.os.environ["BAZEL_VC"]
if "BAZEL_VS" in repository_ctx.os.environ:
return repository_ctx.os.environ["BAZEL_VS"] + "\\VC\\"
auto_configure_warning("'BAZEL_VC' is not set, " +
"start looking for the latest Visual C++ installed.")
# 2. Check if VS%VS_VERSION%COMNTOOLS is set, if true then try to find and use
# vcvarsqueryregistry.bat to detect VC++.
auto_configure_warning("Looking for VS%VERSION%COMNTOOLS environment variables, " +
"eg. VS140COMNTOOLS")
for vscommontools_env in ["VS140COMNTOOLS", "VS120COMNTOOLS",
"VS110COMNTOOLS", "VS100COMNTOOLS", "VS90COMNTOOLS"]:
if vscommontools_env not in repository_ctx.os.environ:
continue
vcvarsqueryregistry = repository_ctx.os.environ[vscommontools_env] + "\\vcvarsqueryregistry.bat"
if not repository_ctx.path(vcvarsqueryregistry).exists:
continue
repository_ctx.file("get_vc_dir.bat",
"@echo off\n" +
"call \"" + vcvarsqueryregistry + "\"\n" +
"echo %VCINSTALLDIR%", True)
env = _add_system_root(repository_ctx, repository_ctx.os.environ)
vc_dir = execute(repository_ctx, ["./get_vc_dir.bat"], environment=env)
auto_configure_warning("Visual C++ build tools found at %s" % vc_dir)
return vc_dir
# 3. User might clean up all environment variables, if so looking for Visual C++ through registry.
# Works for all VS versions, including Visual Studio 2017.
auto_configure_warning("Looking for Visual C++ through registry")
reg_binary = _get_system_root(repository_ctx) + "\\system32\\reg.exe"
vc_dir = None
for key, suffix in (("VC7", ""), ("VS7", "\\VC")):
for version in ["15.0", "14.0", "12.0", "11.0", "10.0", "9.0", "8.0"]:
if vc_dir:
break
result = repository_ctx.execute([reg_binary, "query", "HKEY_LOCAL_MACHINE\\SOFTWARE\\Wow6432Node\\Microsoft\\VisualStudio\\SxS\\" + key, "/v", version])
if is_cc_configure_debug(repository_ctx):
auto_configure_warning("registry query result for VC %s:\n\nSTDOUT(start)\n%s\nSTDOUT(end)\nSTDERR(start):\n%s\nSTDERR(end)\n" %
(version, result.stdout, result.stderr))
if not result.stderr:
for line in result.stdout.split("\n"):
line = line.strip()
if line.startswith(version) and line.find("REG_SZ") != -1:
vc_dir = line[line.find("REG_SZ") + len("REG_SZ"):].strip() + suffix
if not vc_dir:
return None
auto_configure_warning("Visual C++ build tools found at %s" % vc_dir)
return vc_dir
def _is_vs_2017(vc_path):
"""Check if the installed VS version is Visual Studio 2017."""
# In VS 2017, the location of VC is like:
# C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\
# In VS 2015 or older version, it is like:
# C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\
return vc_path.find("2017") != -1
def _find_vcvarsall_bat_script(repository_ctx, vc_path):
"""Find vcvarsall.bat script. Doesn't %-escape the result."""
if _is_vs_2017(vc_path):
vcvarsall = vc_path + "\\Auxiliary\\Build\\VCVARSALL.BAT"
else:
vcvarsall = vc_path + "\\VCVARSALL.BAT"
if not repository_ctx.path(vcvarsall).exists:
return None
return vcvarsall
def _find_env_vars(repository_ctx, vc_path):
"""Get environment variables set by VCVARSALL.BAT. Doesn't %-escape the result!"""
vcvarsall = _find_vcvarsall_bat_script(repository_ctx, vc_path)
repository_ctx.file("get_env.bat",
"@echo off\n" +
"call \"" + vcvarsall + "\" amd64 > NUL \n" +
"echo PATH=%PATH%,INCLUDE=%INCLUDE%,LIB=%LIB% \n", True)
env = _add_system_root(repository_ctx,
{"PATH": "", "INCLUDE": "", "LIB": ""})
envs = execute(repository_ctx, ["./get_env.bat"], environment=env).split(",")
env_map = {}
for env in envs:
key, value = env.split("=", 1)
env_map[key] = escape_string(value.replace("\\", "\\\\"))
return env_map
def find_msvc_tool(repository_ctx, vc_path, tool):
"""Find the exact path of a specific build tool in MSVC. Doesn't %-escape the result."""
tool_path = ""
if _is_vs_2017(vc_path):
# For VS 2017, the tools are under a directory like:
# C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC\Tools\MSVC\14.10.24930\bin\HostX64\x64
dirs = repository_ctx.path(vc_path + "\\Tools\\MSVC").readdir()
if len(dirs) < 1:
return None
# Normally there should be only one child directory under %VC_PATH%\TOOLS\MSVC,
# but iterate every directory to be more robust.
for path in dirs:
tool_path = str(path) + "\\bin\\HostX64\\x64\\" + tool
if repository_ctx.path(tool_path).exists:
break
else:
# For VS 2015 and older version, the tools are under:
# C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\amd64
tool_path = vc_path + "\\bin\\amd64\\" + tool
if not repository_ctx.path(tool_path).exists:
return None
return tool_path
def _find_missing_vc_tools(repository_ctx, vc_path):
"""Check if any required tool is missing under given VC path."""
missing_tools = []
if not _find_vcvarsall_bat_script(repository_ctx, vc_path):
missing_tools.append("VCVARSALL.BAT")
for tool in ["cl.exe", "link.exe", "lib.exe", "ml64.exe"]:
if not find_msvc_tool(repository_ctx, vc_path, tool):
missing_tools.append(tool)
return missing_tools
def _is_support_whole_archive(repository_ctx, vc_path):
"""Run MSVC linker alone to see if it supports /WHOLEARCHIVE."""
env = repository_ctx.os.environ
if "NO_WHOLE_ARCHIVE_OPTION" in env and env["NO_WHOLE_ARCHIVE_OPTION"] == "1":
return False
linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
result = execute(repository_ctx, [linker], expect_failure = True)
return result.find("/WHOLEARCHIVE") != -1
def _is_support_debug_fastlink(repository_ctx, vc_path):
"""Run MSVC linker alone to see if it supports /DEBUG:FASTLINK."""
linker = find_msvc_tool(repository_ctx, vc_path, "link.exe")
result = execute(repository_ctx, [linker], expect_failure = True)
return result.find("/DEBUG[:{FASTLINK|FULL|NONE}]") != -1
def _is_use_msvc_wrapper(repository_ctx):
"""Returns True if USE_MSVC_WRAPPER is set to 1."""
env = repository_ctx.os.environ
return "USE_MSVC_WRAPPER" in env and env["USE_MSVC_WRAPPER"] == "1"
def _get_compilation_mode_content():
"""Return the content for adding flags for different compilation modes when using MSVC wrapper."""
return "\n".join([
" compilation_mode_flags {",
" mode: DBG",
" compiler_flag: '-Xcompilation-mode=dbg'",
" linker_flag: '-Xcompilation-mode=dbg'",
" }",
" compilation_mode_flags {",
" mode: FASTBUILD",
" compiler_flag: '-Xcompilation-mode=fastbuild'",
" linker_flag: '-Xcompilation-mode=fastbuild'",
" }",
" compilation_mode_flags {",
" mode: OPT",
" compiler_flag: '-Xcompilation-mode=opt'",
" linker_flag: '-Xcompilation-mode=opt'",
" }"])
def _escaped_cuda_compute_capabilities(repository_ctx):
"""Returns a %-escaped list of strings representing cuda compute capabilities."""
if "CUDA_COMPUTE_CAPABILITIES" not in repository_ctx.os.environ:
return ["3.5", "5.2"]
capabilities_str = escape_string(repository_ctx.os.environ["CUDA_COMPUTE_CAPABILITIES"])
capabilities = capabilities_str.split(",")
for capability in capabilities:
# Workaround for Skylark's lack of support for regex. This check should
# be equivalent to checking:
# if re.match("[0-9]+.[0-9]+", capability) == None:
parts = capability.split(".")
if len(parts) != 2 or not parts[0].isdigit() or not parts[1].isdigit():
auto_configure_fail("Invalid compute capability: %s" % capability)
return capabilities
def configure_windows_toolchain(repository_ctx):
"""Configure C++ toolchain on Windows."""
repository_ctx.symlink(Label("@bazel_tools//tools/cpp:BUILD.static"), "BUILD")
vc_path = find_vc_path(repository_ctx)
missing_tools = None
vc_installation_error_script = "vc_installation_error.bat"
if not vc_path:
tpl(repository_ctx, vc_installation_error_script, {"%{vc_error_message}" : ""})
else:
missing_tools = _find_missing_vc_tools(repository_ctx, vc_path)
if missing_tools:
tpl(repository_ctx, vc_installation_error_script, {
"%{vc_error_message}" : "\r\n".join([
"echo. 1>&2",
"echo Visual C++ build tools seems to be installed at %s 1>&2" % vc_path,
"echo But Bazel can't find the following tools: 1>&2",
"echo %s 1>&2" % ", ".join(missing_tools),
"echo. 1>&2",
])})
if not vc_path or missing_tools:
tpl(repository_ctx, "CROSSTOOL", {
"%{cpu}": "x64_windows",
"%{default_toolchain_name}": "msvc_x64",
"%{toolchain_name}": "msys_x64",
"%{msvc_env_tmp}": "",
"%{msvc_env_path}": "",
"%{msvc_env_include}": "",
"%{msvc_env_lib}": "",
"%{msvc_cl_path}": vc_installation_error_script,
"%{msvc_ml_path}": vc_installation_error_script,
"%{msvc_link_path}": vc_installation_error_script,
"%{msvc_lib_path}": vc_installation_error_script,
"%{dbg_mode_debug}": "/DEBUG",
"%{fastbuild_mode_debug}": "/DEBUG",
"%{compilation_mode_content}": "",
"%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
"%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
"%{opt_content}": "",
"%{dbg_content}": "",
"%{link_content}": "",
"%{cxx_builtin_include_directory}": "",
"%{coverage}": "",
})
return
env = _find_env_vars(repository_ctx, vc_path)
escaped_paths = escape_string(env["PATH"])
escaped_include_paths = escape_string(env["INCLUDE"])
escaped_lib_paths = escape_string(env["LIB"])
escaped_tmp_dir = escape_string(
get_env_var(repository_ctx, "TMP", "C:\\Windows\\Temp").replace("\\", "\\\\"))
msvc_cl_path = find_msvc_tool(repository_ctx, vc_path, "cl.exe").replace("\\", "/")
msvc_ml_path = find_msvc_tool(repository_ctx, vc_path, "ml64.exe").replace("\\", "/")
msvc_link_path = find_msvc_tool(repository_ctx, vc_path, "link.exe").replace("\\", "/")
msvc_lib_path = find_msvc_tool(repository_ctx, vc_path, "lib.exe").replace("\\", "/")
escaped_cxx_include_directories = []
compilation_mode_content = ""
if _is_use_msvc_wrapper(repository_ctx):
if _is_support_whole_archive(repository_ctx, vc_path):
support_whole_archive = "True"
else:
support_whole_archive = "False"
nvcc_tmp_dir_name = escaped_tmp_dir + "\\\\nvcc_inter_files_tmp_dir"
# Make sure nvcc.exe is in PATH
cuda_path = _find_cuda(repository_ctx)
if cuda_path:
escaped_paths = escape_string(cuda_path.replace("\\", "\\\\") + "/bin;") + escaped_paths
escaped_compute_capabilities = _escaped_cuda_compute_capabilities(repository_ctx)
tpl(repository_ctx, "wrapper/bin/pydir/msvc_tools.py", {
"%{lib_tool}": escape_string(msvc_lib_path),
"%{support_whole_archive}": support_whole_archive,
"%{cuda_compute_capabilities}": ", ".join(
["\"%s\"" % c for c in escaped_compute_capabilities]),
"%{nvcc_tmp_dir_name}": nvcc_tmp_dir_name,
})
# nvcc will generate some source files under %{nvcc_tmp_dir_name}
# The generated files are guranteed to have unique name, so they can share the same tmp directory
escaped_cxx_include_directories += [ "cxx_builtin_include_directory: \"%s\"" % nvcc_tmp_dir_name ]
msvc_wrapper = repository_ctx.path(Label("@bazel_tools//tools/cpp:CROSSTOOL")).dirname.get_child("wrapper").get_child("bin")
for f in ["msvc_cl.bat", "msvc_link.bat", "msvc_nop.bat"]:
repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/" + f)
msvc_wrapper = msvc_wrapper.get_child("pydir")
for f in ["msvc_cl.py", "msvc_link.py"]:
repository_ctx.symlink(msvc_wrapper.get_child(f), "wrapper/bin/pydir/" + f)
python_binary = _find_python(repository_ctx)
tpl(repository_ctx, "wrapper/bin/call_python.bat", {"%{python_binary}": escape_string(python_binary)})
msvc_cl_path = "wrapper/bin/msvc_cl.bat"
msvc_link_path = "wrapper/bin/msvc_link.bat"
msvc_lib_path = "wrapper/bin/msvc_link.bat"
compilation_mode_content = _get_compilation_mode_content()
for path in escaped_include_paths.split(";"):
if path:
escaped_cxx_include_directories.append("cxx_builtin_include_directory: \"%s\"" % path)
support_debug_fastlink = _is_support_debug_fastlink(repository_ctx, vc_path)
tpl(repository_ctx, "CROSSTOOL", {
"%{cpu}": "x64_windows",
"%{default_toolchain_name}": "msvc_x64",
"%{toolchain_name}": "msys_x64",
"%{msvc_env_tmp}": escaped_tmp_dir,
"%{msvc_env_path}": escaped_paths,
"%{msvc_env_include}": escaped_include_paths,
"%{msvc_env_lib}": escaped_lib_paths,
"%{msvc_cl_path}": msvc_cl_path,
"%{msvc_ml_path}": msvc_ml_path,
"%{msvc_link_path}": msvc_link_path,
"%{msvc_lib_path}": msvc_lib_path,
"%{dbg_mode_debug}": "/DEBUG:FULL" if support_debug_fastlink else "/DEBUG",
"%{fastbuild_mode_debug}": "/DEBUG:FASTLINK" if support_debug_fastlink else "/DEBUG",
"%{compilation_mode_content}": compilation_mode_content,
"%{content}": _get_escaped_windows_msys_crosstool_content(repository_ctx),
"%{msys_x64_mingw_content}": _get_escaped_windows_msys_crosstool_content(repository_ctx, use_mingw = True),
"%{opt_content}": "",
"%{dbg_content}": "",
"%{link_content}": "",
"%{cxx_builtin_include_directory}": "\n".join(escaped_cxx_include_directories),
"%{coverage}": "",
})
|
the-stack_0_10122 | """
Commands for home spaces/rooms.
"""
from evennia import CmdSet
from commands.base import ArxCommand
from django.conf import settings
from world.dominion.models import LIFESTYLES
from django.db.models import Q
from evennia.objects.models import ObjectDB
from world.dominion.models import AssetOwner, Organization, CraftingRecipe
from commands.base_commands.crafting import CmdCraft
from commands.base_commands.overrides import CmdDig
from server.utils.prettytable import PrettyTable
from server.utils.arx_utils import inform_staff, raw
from evennia.utils import utils
from evennia.utils.evtable import EvTable
from typeclasses.characters import Character
import re
# error return function, needed by Extended Look command
AT_SEARCH_RESULT = utils.variable_from_module(*settings.SEARCH_AT_RESULT.rsplit('.', 1))
DESC_COST = 0
class HomeCmdSet(CmdSet):
"""CmdSet for a home spaces."""
key = "HomeCmdSet"
priority = 101
duplicates = False
no_exits = False
no_objs = False
def at_cmdset_creation(self):
"""
This is the only method defined in a cmdset, called during
its creation. It should populate the set with command instances.
Note that it can also take other cmdsets as arguments, which will
be used by the character default cmdset to add all of these onto
the internal cmdset stack. They will then be able to removed or
replaced as needed.
"""
self.add(CmdManageHome())
class CmdManageHome(ArxCommand):
"""
+home
Usage:
+home
+home/lock
+home/unlock
+home/key <character>
+home/passmsg <message people see when entering>
+home/lockmsg <message those who can't enter see>
+home/rmkey <character>
+home/lifestyle <rating>
Controls your home. /passmsg is for use of the 'pass' command to
go through a locked door. /lockmsg is for those who are denied
entry. /lifestyle is to control how much silver you spend per
week and earn prestige.
"""
key = "+home"
# aliases = ["@home"]
locks = "cmd:all()"
help_category = "Home"
def display_lifestyles(self):
"""Displays table of Dominion lifestyles with the character's current selection"""
caller = self.caller
table = PrettyTable(["{wRating{n", "{wCost{n", "{wPrestige{n"])
caller.msg("{wLifestyles:{n")
for rating in LIFESTYLES:
num = str(rating)
if caller.player_ob.Dominion.lifestyle_rating == rating:
num += '{w*{n'
table.add_row([num, LIFESTYLES[rating][0], LIFESTYLES[rating][1]])
caller.msg(str(table), options={'box': True})
def func(self):
"""Execute command."""
caller = self.caller
loc = caller.location
entrances = loc.entrances
owners = loc.db.owners or []
keylist = loc.db.keylist or []
if caller not in owners and not caller.check_permstring("builders"):
caller.msg("You are not the owner of this room.")
return
if not self.args and not self.switches:
locked = "{rlocked{n" if loc.db.locked else "{wunlocked{n"
caller.msg("Your home is currently %s." % locked)
caller.msg("{wOwners:{n %s" % ", ".join(str(ob) for ob in owners))
caller.msg("{wCharacters who have keys:{n %s" % ", ".join(str(ob) for ob in keylist))
entrance = entrances[0]
entmsg = entrance.db.success_traverse or ""
errmsg = entrance.db.err_traverse or ""
caller.msg("{wMessage upon passing through locked door:{n %s" % entmsg)
caller.msg("{wMessage upon being denied access:{n %s" % errmsg)
return
if "unlock" in self.switches:
# we only show as locked if -all- entrances are locked
for ent in entrances:
ent.unlock_exit()
loc.db.locked = False
caller.msg("Your house is now unlocked.")
return
if "lock" in self.switches:
loc.db.locked = True
caller.msg("Your house is now locked.")
for ent in entrances:
ent.lock_exit()
return
if "lifestyle" in self.switches and not self.args:
# list lifestyles
self.display_lifestyles()
return
if not self.args:
caller.msg("You must provide an argument to the command.")
return
if "lockmsg" in self.switches:
for r_exit in entrances:
r_exit.db.err_traverse = self.args
caller.msg("{wThe message those who can't enter now see is{n: %s" % self.args)
return
if "passmsg" in self.switches:
for r_exit in entrances:
r_exit.db.success_traverse = self.args
caller.msg("{wThe message those who enter will now see is{n: %s" % self.args)
return
if "lifestyle" in self.switches or "lifestyles" in self.switches:
if caller not in owners:
caller.msg("You may only set the lifestyle rating for an owner.")
return
try:
LIFESTYLES[int(self.args)]
except (KeyError, TypeError, ValueError):
caller.msg("%s is not a valid lifestyle." % self.args)
self.display_lifestyles()
return
caller.player_ob.Dominion.lifestyle_rating = int(self.args)
caller.player_ob.Dominion.save()
caller.msg("Your lifestyle rating has been set to %s." % self.args)
return
player = caller.player.search(self.lhs)
if not player:
return
char = player.char_ob
if not char:
caller.msg("No character found.")
return
keys = char.db.keylist or []
if "key" in self.switches:
if loc in keys and char in keylist:
caller.msg("They already have a key to here.")
return
if loc not in keys:
keys.append(loc)
char.db.keylist = keys
if char not in keylist:
keylist.append(char)
loc.db.keylist = keylist
char.msg("{c%s{w has granted you a key to %s." % (caller, loc))
caller.msg("{wYou have granted {c%s{w a key.{n" % char)
return
if "rmkey" in self.switches:
if loc not in keys and char not in keylist:
caller.msg("They don't have a key to here.")
return
if loc in keys:
keys.remove(loc)
char.db.keylist = keys
if char in keylist:
keylist.remove(char)
loc.db.keylist = keylist
char.msg("{c%s{w has removed your access to %s." % (caller, loc))
caller.msg("{wYou have removed {c%s{w's key.{n" % char)
return
class CmdAllowBuilding(ArxCommand):
"""
@allowbuilding
Usage:
@allowbuilding
@allowbuilding all[=<cost>]
@allowbuilding <name>[,<name2>,...][=<cost>]
@allowbuilding/clear
Flags your current room as permitting characters to build there.
The name provided can either be a character or organization name.
Cost is 100 economic resources unless specified otherwise. Max
rooms that anyone can build off here is set by the 'expansion_cap'
attribute, defaults to 1 if not defined. Tracked separately for
each org/player, so any number of people could build 1 room off
a room with expansion_cap of 1 in a room, as long as they are
permitted to do so.
"""
key = "@allowbuilding"
locks = "cmd:perm(Builders)"
help_category = "Building"
def func(self):
"""Execute command."""
caller = self.caller
loc = caller.location
permits = loc.db.permitted_builders or {}
if not self.args and not self.switches:
table = PrettyTable(["Name", "Cost"])
for permit_id in permits:
if permit_id == "all":
owner = "all"
else:
owner = AssetOwner.objects.get(id=permit_id)
cost = permits[permit_id]
table.add_row([str(owner), cost])
caller.msg(str(table))
return
if "clear" in self.switches:
loc.db.permitted_builders = {}
caller.msg("Perms wiped.")
return
cost = self.rhs and int(self.rhs) or 100
for name in self.lhslist:
if name == "all":
permits["all"] = cost
continue
try:
owner = AssetOwner.objects.get(Q(organization_owner__name__iexact=name)
| Q(player__player__username__iexact=name))
except AssetOwner.DoesNotExist:
caller.msg("No owner by name of %s." % name)
continue
permits[owner.id] = cost
loc.db.permitted_builders = permits
caller.msg("Perms set.")
return
class CmdBuildRoom(CmdDig):
"""
+buildroom - build and connect new rooms to the current one
Usage:
+buildroom roomname=exit_to_there[;alias], exit_to_here[;alias]
+buildroom/org orgname/roomname=[exits]
Examples:
+buildroom kitchen = north;n, south;s
+buildroom sheer cliff= climb up, climb down
+buildroom/org velenosa/dungeon=door;d, out;o
This command is a convenient way to build rooms quickly; it creates the
new room and you can optionally set up exits back and forth between your
current room and the new one. You can add as many aliases as you
like to the name of the room and the exits in question; an example
would be 'north;no;n'.
"""
key = "+buildroom"
locks = "cmd:all()"
help_category = "Home"
help_entry_tags = ["housing"]
# noinspection PyAttributeOutsideInit
def func(self):
"""Do the digging. Inherits variables from ObjManipCommand.parse()"""
caller = self.caller
loc = caller.location
# lots of checks and shit here
permits = loc.db.permitted_builders or {}
if not permits:
caller.msg("No one is currently allowed to build a house from here.")
return
expansions = loc.db.expansions or {}
max_expansions = loc.db.expansion_cap or 20
assets = None
# base cost = 1000
dompc = caller.player_ob.Dominion
if "org" in self.switches:
# max_rooms = 100
try:
largs = self.lhs.split("/")
orgname = largs[0]
roomname = largs[1]
except IndexError:
caller.msg("Please specify orgname/roomname.")
return
try:
org = Organization.objects.get(Q(name__iexact=orgname) &
Q(members__player=dompc) &
Q(members__deguilded=False))
if not org.access(caller, 'build'):
caller.msg("You are not permitted to build for this org.")
return
self.lhs = roomname
self.lhslist = [roomname]
self.args = "%s=%s" % (self.lhs, self.rhs)
# fix args for CmdDig
self.parse()
assets = org.assets
cost = permits[assets.id]
except KeyError:
if "all" not in permits:
caller.msg("That org is not permitted to build here.")
return
cost = permits["all"]
except Organization.DoesNotExist:
caller.msg("No org by that name: %s." % orgname)
return
else:
# max_rooms = 3
assets = dompc.assets
if assets.id in permits:
cost = permits[assets.id]
else:
if "all" not in permits:
caller.msg("You are not allowed to build here.")
return
cost = permits["all"]
try:
if expansions.get(assets.id, 0) >= max_expansions:
caller.msg("You have built as many rooms from this space as you are allowed.")
return
except (AttributeError, TypeError, ValueError):
caller.msg("{rError logged.{n")
inform_staff("Room %s has an invalid expansions attribute." % loc.id)
return
if not self.lhs:
caller.msg("The cost for you to build from this room is %s." % cost)
return
if cost > assets.economic:
noun = "you" if dompc.assets == assets else str(assets)
caller.msg("It would cost %s %s to build here, but only have %s." % (noun, cost, assets.economic))
if noun != "you":
caller.msg("Deposit resources into the account of %s." % noun)
return
tagname = "%s_owned_room" % str(assets)
# because who fucking cares
# if tagname not in loc.tags.all() and (
# ObjectDB.objects.filter(Q(db_typeclass_path=settings.BASE_ROOM_TYPECLASS)
# & Q(db_tags__db_key__iexact=tagname)
# ).count() > max_rooms):
# caller.msg("You have as many rooms as you are allowed.")
# return
if not self.rhs or len(self.rhslist) < 2:
caller.msg("You must specify an exit and return exit for the new room.")
return
if not re.findall('^[\-\w\'{\[,%;|# ]+$', self.lhs) or not re.findall('^[\-\w\'{\[,%;|<># ]+$', self.rhs):
caller.msg("Invalid characters entered for names or exits.")
return
new_room = CmdDig.func(self)
if not new_room:
return
assets.economic -= cost
assets.save()
# do setup shit for new room here
new_room.db.room_owner = assets.id
new_room.tags.add("player_made_room")
new_room.tags.add(tagname)
new_room.tags.add("private")
new_room.db.expansion_cap = 20
new_room.db.expansions = {}
new_room.db.cost_increase_per_expansion = 25
cost_increase = loc.db.cost_increase_per_expansion or 0
new_room.db.permitted_builders = {assets.id: cost + cost_increase}
new_room.db.x_coord = loc.db.x_coord
new_room.db.y_coord = loc.db.y_coord
my_expansions = expansions.get(assets.id, 0) + 1
expansions[assets.id] = my_expansions
loc.db.expansions = expansions
new_room.name = new_room.name # this will setup .db.colored_name and strip ansi from key
if cost_increase and assets.id in permits:
permits[assets.id] += cost_increase
loc.db.permitted_builders = permits
class CmdManageRoom(ArxCommand):
"""
+manageroom
Usage:
+manageroom
+manageroom/name <name>
+manageroom/desc <description>
+manageroom/springdesc <description>
+manageroom/summerdesc <description>
+manageroom/falldesc <description>
+manageroom/winterdesc <description>
+manageroom/exitname <exit>=<new name>
+manageroom/addhome <owner>
+manageroom/confirmhome <owner>
+manageroom/rmhome <owner>
+manageroom/addshop <owner>
+manageroom/confirmshop <owner>
+manageroom/rmshop <owner>
+manageroom/toggleprivate
+manageroom/setbarracks
+manageroom/addbouncer <character>
+manageroom/rmbouncer <character>
+manageroom/adddecorator <character>
+manageroom/rmdecorator <character>
+manageroom/ban <character>
+manageroom/unban <character>
+manageroom/boot <character>=<exit>
Flags your current room as permitting characters to build there.
Cost is 100 economic resources unless specified otherwise.
To set a seasonal description for your room, use /springdesc, /summerdesc,
etc. /desc will always be shown as a fallback otherwise.
You can also embed special time markers in your room description, like this:
```
<night>In the darkness, the forest looks foreboding.</night>.
<morning>Birds are chirping and whatnot.</morning>
<afternoon>Birds are no longer chirping.</morning>
<evening>THEY WILL NEVER CHIRP AGAIN.</evening>
```
Text marked this way will only display when the server is truly at the given
timeslot. The available times are night, morning, afternoon and evening.
Note that `@detail`, seasons and time-of-day slots only work on rooms in this
version of the `@desc` command.
Owners can appoint characters to be decorators or bouncers, to allow them to
use commands while not owners.
The ban switch prevents characters from being able to enter the room. The boot
switch removes characters from the room. Bouncers are able to use ban and boot.
Decorators are permitted to use the desc switches.
"""
key = "+manageroom"
locks = "cmd:all()"
help_category = "Home"
desc_switches = ("desc", "winterdesc", "springdesc", "summerdesc", "falldesc")
bouncer_switches = ("ban", "unban", "boot")
personnel_switches = ("addbouncer", "rmbouncer", "adddecorator", "rmdecorator")
help_entry_tags = ["housing"]
def check_perms(self):
"""Checks the permissions for the room"""
caller = self.caller
loc = caller.location
if not self.switches or set(self.switches) & set(self.bouncer_switches):
if caller in loc.bouncers:
return True
if not self.switches or set(self.switches) & set(self.desc_switches):
if caller in loc.decorators:
return True
try:
owner = AssetOwner.objects.get(id=loc.db.room_owner)
except AssetOwner.DoesNotExist:
caller.msg("No owner is defined here.")
return
org = owner.organization_owner
if not org and not (owner == caller.player_ob.Dominion.assets
or ('confirmhome' in self.switches or
'confirmshop' in self.switches)):
caller.msg("You are not the owner here.")
return
if org and not (org.access(caller, 'build') or ('confirmhome' in self.switches or
'confirmshop' in self.switches)):
caller.msg("You do not have permission to build here.")
return
return True
def func(self):
"""Execute command."""
caller = self.caller
loc = caller.location
if not self.check_perms():
return
if not self.switches:
# display who has a home here, who has a shop here
owners = loc.db.owners or []
caller.msg("{wHome Owners:{n %s" % ", ".join(str(ob) for ob in owners))
shops = loc.db.shopowner
caller.msg("{wShop Owners:{n %s" % shops)
self.msg("{wBouncers:{n %s" % ", ".join(str(ob) for ob in loc.bouncers))
self.msg("{wDecorators:{n %s" % ", ".join(str(ob) for ob in loc.decorators))
self.msg("{wBanned:{n %s" % ", ".join(str(ob) for ob in loc.banlist))
return
if "name" in self.switches:
loc.name = self.args or loc.name
caller.msg("Room name changed to %s." % loc)
return
if "exitname" in self.switches:
if not self.rhs:
caller.msg("Invalid usage.")
return
rhslist = self.rhs.split(";")
rhs = rhslist[0]
aliases = rhslist[1:]
exit_object = caller.search(self.lhs)
if not exit_object:
return
old = str(exit_object)
if exit_object.typeclass_path != settings.BASE_EXIT_TYPECLASS:
caller.msg("That is not an exit.")
return
exit_object.name = rhs
exit_object.save()
exit_object.aliases.clear()
for alias in aliases:
exit_object.aliases.add(alias)
if exit_object.destination:
exit_object.flush_from_cache()
caller.msg("%s changed to %s." % (old, exit_object))
return
if (set(self.switches) & set(self.personnel_switches)) or (set(self.switches) & set(self.bouncer_switches)):
targ = self.caller.player.search(self.lhs)
if not targ:
return
targ = targ.char_ob
if "addbouncer" in self.switches:
loc.add_bouncer(targ)
self.msg("%s is now a bouncer." % targ)
return
if "rmbouncer" in self.switches:
loc.remove_bouncer(targ)
self.msg("%s is no longer a bouncer." % targ)
return
if "adddecorator" in self.switches:
loc.add_decorator(targ)
self.msg("%s is now a decorator." % targ)
return
if "rmdecorator" in self.switches:
loc.remove_decorator(targ)
self.msg("%s is no longer a decorator." % targ)
return
if "unban" in self.switches:
loc.unban_character(targ)
self.msg("%s is no longer banned from entering." % targ)
return
if "ban" in self.switches:
loc.ban_character(targ)
self.msg("%s is now prevented from entering." % targ)
return
if "boot" in self.switches:
from typeclasses.exits import Exit
exit_obj = self.caller.search(self.rhs, typeclass=Exit)
if not exit_obj:
return
if not exit_obj.can_traverse(targ):
self.msg("They cannot move through that exit.")
return
if targ.location != self.caller.location:
self.msg("They aren't here.")
return
exit_obj.at_traverse(targ, exit_obj.destination)
self.msg("You have kicked out %s." % targ)
targ.msg("You have been kicked out by %s." % self.caller)
return
try:
owner = AssetOwner.objects.get(id=loc.db.room_owner)
except AssetOwner.DoesNotExist:
caller.msg("No owner is defined here.")
return
if set(self.switches) & set(self.desc_switches):
if "player_made_room" not in loc.tags.all():
self.msg("You cannot change the description to a room that was made by a GM.")
return
if loc.desc:
cost = loc.db.desc_cost or DESC_COST
else:
cost = 0
if loc.ndb.confirm_desc_change != self.args:
caller.msg("Your room's current %s is:" % self.switches[0])
if "desc" in self.switches:
caller.msg(loc.desc)
elif "springdesc" in self.switches:
caller.msg(loc.db.spring_desc)
elif "summerdesc" in self.switches:
caller.msg(loc.db.summer_desc)
elif "winterdesc" in self.switches:
caller.msg(loc.db.winter_desc)
elif "falldesc" in self.switches:
caller.msg(loc.db.autumn_desc)
caller.msg("{wCost of changing desc:{n %s economic resources" % cost)
if self.args:
caller.msg("New desc:")
caller.msg(self.args)
caller.msg("{wTo confirm this, use the command again.{n")
caller.msg("{wChanging this desc will prompt you again for a confirmation.{n")
loc.ndb.confirm_desc_change = self.args
return
if cost:
if cost > owner.economic:
caller.msg("It would cost %s to re-desc the room, and you have %s." % (cost, owner.economic))
return
owner.economic -= cost
owner.save()
if "desc" in self.switches:
loc.desc = self.args
if not loc.db.raw_desc:
loc.db.raw_desc = self.args
if not loc.db.general_desc:
loc.db.general_desc = self.args
elif "winterdesc" in self.switches:
loc.db.winter_desc = self.args
elif "summerdesc" in self.switches:
loc.db.summer_desc = self.args
elif "springdesc" in self.switches:
loc.db.spring_desc = self.args
elif "falldesc" in self.switches:
loc.db.autumn_desc = self.args
loc.ndb.confirm_desc_change = None
# force raw_desc to update and parse our descs
loc.ndb.last_season = None
loc.ndb.last_timeslot = None
caller.msg("%s changed to:" % self.switches[0])
caller.msg(self.args)
return
if "confirmhome" in self.switches:
if caller.db.homeproposal != loc:
caller.msg("You don't have an active invitation to accept here. Have them reissue it.")
return
caller.attributes.remove("homeproposal")
loc.setup_home(caller)
caller.msg("You have set up your home here.")
return
if "confirmshop" in self.switches:
if caller.db.shopproposal != loc:
caller.msg("You don't have an active invitation to accept here. Have them reissue it.")
return
caller.attributes.remove("shopproposal")
loc.setup_shop(caller)
caller.msg("You have set up a shop here.")
return
if "toggleprivate" in self.switches:
if "private" in loc.tags.all():
loc.tags.remove("private")
caller.msg("Room no longer private.")
return
loc.tags.add("private")
caller.msg("Room is now private.")
return
if "setbarracks" in self.switches:
tagname = str(owner) + "_barracks"
other_barracks = ObjectDB.objects.filter(db_tags__db_key=tagname)
for obj in other_barracks:
obj.tags.remove(tagname)
loc.tags.add(tagname)
self.msg("%s set to %s's barracks." % (loc, owner))
return
player = caller.player.search(self.args)
if not player:
return
char = player.char_ob
if not char:
caller.msg("No char.")
return
if "addhome" in self.switches or "addshop" in self.switches:
noun = "home" if "addhome" in self.switches else "shop"
if noun == "home":
char.db.homeproposal = loc
else:
char.db.shopproposal = loc
if loc.db.shopowner:
caller.msg("You must shut down the current shop here before adding another.")
return
msg = "%s has offered you a %s. To accept it, go to %s" % (caller, noun, loc.key)
msg += " and use {w+manageroom/confirm%s{n." % noun
player.send_or_queue_msg(msg)
caller.msg("You have offered %s this room as a %s." % (char, noun))
return
if "rmhome" in self.switches:
loc.remove_homeowner(char)
player.send_or_queue_msg("Your home at %s has been removed." % loc)
return
if "rmshop" in self.switches:
loc.del_shop()
player.send_or_queue_msg("Your shop at %s has been removed." % loc)
return
class CmdManageShop(ArxCommand):
"""
+manageshop
Usage:
+manageshop
+manageshop/sellitem <object>=<price>
+manageshop/rmitem <object id>
+manageshop/all <markup percentage>
+manageshop/refinecost <percentage>
+manageshop/addrecipe <recipe name>=<markup percentage>
+manageshop/rmrecipe <recipe name>
+manageshop/addblacklist <player or org name>
+manageshop/rmblacklist <player or org name>
+manageshop/orgdiscount <org name>=<percentage>
+manageshop/chardiscount <character>=<percentage>
+manageshop/adddesign <key>=<code>
+manageshop/rmdesign <key>
Sets prices for your shop. Note that if you use 'all', that will
be used for any recipe you don't explicitly set a price for.
"""
key = "+manageshop"
locks = "cmd:all()"
help_category = "Home"
help_entry_tags = ["shops"]
def list_prices(self):
"""Lists a table of prices for the shop owner"""
loc = self.caller.location
prices = loc.db.crafting_prices or {}
msg = "{wCrafting Prices{n\n"
table = PrettyTable(["{wName{n", "{wPrice Markup Percentage{n"])
for price in prices:
if price == "removed":
continue
if price == "all" or price == "refine":
name = price
else:
name = (CraftingRecipe.objects.get(id=price)).name
table.add_row([name, "%s%%" % prices[price]])
msg += str(table)
msg += "\n{wItem Prices{n\n"
table = EvTable("{wID{n", "{wName{n", "{wPrice{n", width=78, border="cells")
prices = loc.db.item_prices or {}
for price in prices:
obj = ObjectDB.objects.get(id=price)
table.add_row(price, str(obj), prices[price])
msg += str(table)
return msg
def list_designs(self):
"""Lists designs the shop owner has created for crafting templates"""
designs = self.caller.location.db.template_designs or {}
self.msg("{wTemplate designs:{n %s" % ", ".join(designs.keys()))
def func(self):
"""Execute command."""
caller = self.caller
loc = caller.location
if caller != loc.db.shopowner:
caller.msg("You are not the shop's owner.")
return
if not self.args:
caller.msg(self.list_prices())
org_discounts = (loc.db.discounts or {}).items()
char_discounts = (loc.db.char_discounts or {}).items()
# replace char with char.key in char_discounts list
char_discounts = [(ob[0].key, ob[1]) for ob in char_discounts]
discounts = ", ".join(("%s: %s%%" % (ob, val) for ob, val in (org_discounts + char_discounts)))
caller.msg("{wDiscounts{n: %s" % discounts)
blacklist = []
if loc.db.blacklist:
# if ob doesn't have a key, it becomes a string (because corporations aren't ppl)
blacklist = [getattr(ob, 'key', str(ob)) for ob in loc.db.blacklist]
caller.msg("{wBlacklist{n: %s" % ", ".join(blacklist))
self.list_designs()
return
if "sellitem" in self.switches:
try:
price = int(self.rhs)
if price < 0:
raise ValueError
except (TypeError, ValueError):
caller.msg("Price must be a positive number.")
return
results = caller.search(self.lhs, location=caller, quiet=True)
obj = AT_SEARCH_RESULT(results, caller, self.lhs, False,
nofound_string="You don't carry %s." % self.lhs,
multimatch_string="You carry more than one %s:" % self.lhs)
if not obj:
return
obj.at_drop(caller)
obj.location = None
loc.db.item_prices[obj.id] = price
obj.tags.add("for_sale")
obj.db.sale_location = loc
caller.msg("You put %s for sale for %s silver." % (obj, price))
return
if "rmitem" in self.switches:
try:
num = int(self.args)
if num not in loc.db.item_prices:
caller.msg("No item by that ID being sold.")
return
obj = ObjectDB.objects.get(id=num)
except ObjectDB.DoesNotExist:
caller.msg("No object by that ID exists.")
return
except (ValueError, TypeError):
caller.msg("You have to specify the ID # of an item you're trying to remove.")
return
obj.move_to(caller)
obj.tags.remove("for_sale")
obj.attributes.remove("sale_location")
del loc.db.item_prices[obj.id]
caller.msg("You have removed %s from your sale list." % obj)
return
if "all" in self.switches or "refinecost" in self.switches:
try:
cost = int(self.args)
if cost < 0:
raise ValueError
except ValueError:
caller.msg("Cost must be a non-negative number.")
return
if "all" in self.switches:
loc.db.crafting_prices['all'] = cost
caller.msg("Cost for non-specified recipes set to %s percent markup." % cost)
else:
loc.db.crafting_prices['refine'] = cost
caller.msg("Cost for refining set to %s percent markup." % cost)
return
if "addrecipe" in self.switches:
prices = loc.db.crafting_prices or {}
try:
recipe = caller.player_ob.Dominion.assets.recipes.get(name__iexact=self.lhs)
cost = int(self.rhs)
if cost < 0:
raise ValueError
except (TypeError, ValueError):
caller.msg("Cost must be a positive number.")
return
except (CraftingRecipe.DoesNotExist, CraftingRecipe.MultipleObjectsReturned):
caller.msg("Could not retrieve a recipe by that name.")
return
prices[recipe.id] = cost
caller.msg("Price for %s set to %s." % (recipe.name, cost))
removedlist = prices.get("removed", [])
if recipe.id in removedlist:
removedlist.remove(recipe.id)
prices['removed'] = removedlist
loc.db.crafting_prices = prices
return
if "rmrecipe" in self.switches:
arg = None
prices = loc.db.crafting_prices or {}
try:
recipe = None
if self.lhs.lower() == "all":
arg = "all"
elif self.lhs.lower() == "refining":
arg = "refining"
else:
recipe = caller.player_ob.Dominion.assets.recipes.get(name__iexact=self.lhs)
arg = recipe.id
del prices[arg]
caller.msg("Price for %s has been removed." % recipe.name if recipe else arg)
except KeyError:
removedlist = prices.get("removed", [])
if arg in removedlist:
caller.msg("You had no price listed for that recipe.")
else:
try:
removedlist.append(int(arg))
prices["removed"] = removedlist
except ValueError:
caller.msg("Must be an ID.")
except CraftingRecipe.DoesNotExist:
caller.msg("No recipe found by that name.")
finally:
loc.db.crafting_prices = prices
return
if "adddesign" in self.switches:
designs = loc.db.template_designs or {}
try:
if not self.rhs:
self.msg("Design for %s: %s" % (self.lhs, designs[self.lhs]))
return
except KeyError:
self.list_designs()
return
designs[self.lhs] = self.rhs
self.msg("Raw Design for %s is now: %s" % (self.lhs, raw(self.rhs)))
self.msg("Design for %s appears as: %s" % (self.lhs, self.rhs))
loc.db.template_designs = designs
return
if "rmdesign" in self.switches:
designs = loc.db.template_designs or {}
try:
del designs[self.lhs]
self.msg("Design deleted.")
except KeyError:
self.msg("No design by that name.")
self.list_designs()
loc.db.template_designs = designs
return
if "addblacklist" in self.switches or "rmblacklist" in self.switches:
blacklist = loc.db.blacklist or []
try:
targ = caller.player.search(self.args, nofound_string="No player by that name. Checking organizations.")
org = False
if not targ:
org = True
targ = Organization.objects.get(name__iexact=self.args)
else:
targ = targ.char_ob
if "addblacklist" in self.switches:
if org:
if targ.name in blacklist:
caller.msg("They are already in the blacklist.")
return
blacklist.append(targ.name)
else:
if targ in blacklist:
caller.msg("They are already in the blacklist.")
return
blacklist.append(targ)
caller.msg("%s added to blacklist." % getattr(targ, 'key', targ))
else:
if org:
if targ.name not in blacklist:
caller.msg("They are not in the blacklist.")
return
blacklist.remove(targ.name)
else:
if targ not in blacklist:
caller.msg("They are not in the blacklist.")
return
blacklist.remove(targ)
caller.msg("%s removed from blacklist." % getattr(targ, 'key', targ))
except Organization.DoesNotExist:
caller.msg("No valid target found by that name.")
loc.db.blacklist = blacklist
return
if "orgdiscount" in self.switches:
try:
org = Organization.objects.get(name__iexact=self.lhs)
discount = int(self.rhs)
if discount > 100:
raise ValueError
if discount == 0:
loc.db.discounts.pop(org.name, 0)
self.msg("Removed discount for %s." % org)
return
loc.db.discounts[org.name] = discount
caller.msg("%s given a discount of %s percent." % (org, discount))
return
except (TypeError, ValueError):
caller.msg("Discount must be a number, max of 100.")
return
except Organization.DoesNotExist:
caller.msg("No organization by that name found.")
return
if "chardiscount" in self.switches:
if loc.db.char_discounts is None:
loc.db.char_discounts = {}
try:
character = Character.objects.get(db_key__iexact=self.lhs)
discount = int(self.rhs)
if discount > 100:
raise ValueError
if discount == 0:
loc.db.char_discounts.pop(character, 0)
self.msg("Removed discount for %s." % character.key)
return
loc.db.char_discounts[character] = discount
caller.msg("%s given a discount of %s percent." % (character.key, discount))
return
except (TypeError, ValueError):
caller.msg("Discount must be a number, max of 100.")
return
except Character.DoesNotExist:
caller.msg("No character found by that name.")
return
caller.msg("Invalid switch.")
class CmdBuyFromShop(CmdCraft):
"""
+shop
Usage:
+shop
+shop/filter <word in item name>
+shop/buy <item number>
+shop/look <item number>
+shop/viewdesigns [<key>]
+shop/name <name>
+shop/desc <description>
+shop/altdesc <description>
+shop/adorn <material type>=<amount>
+shop/translated_text <language>=<text>
+shop/finish [<additional silver to invest>,<AP to invest>]
+shop/abandon
+shop/changename <object>=<new name>
+shop/refine <object>[=<additional silver to spend>,AP to spend>]
+shop/addadorn <object>=<material type>,<amount>
+shop/craft
Allows you to buy objects from a shop. +shop/craft allows you to use a
crafter's skill to create an item. Similarly, +shop/refine lets you use a
crafter's skill to attempt to improve a crafted object. Check 'help craft'
for an explanation of switches, all of which can be used with +shop. Costs
and materials are covered by you. +shop/viewdesigns lets you see the
crafter's pre-made descriptions that you can copy for items you create.
"""
key = "+shop"
aliases = ["@shop", "shop"]
locks = "cmd:all()"
help_category = "Home"
def get_discount(self):
"""Returns our percentage discount"""
loc = self.caller.location
char_discounts = loc.db.char_discounts or {}
discount = 0.0
discounts = loc.db.discounts or {}
if self.caller in char_discounts:
return char_discounts[self.caller]
for org in self.caller.player_ob.Dominion.current_orgs:
odiscount = discounts.get(org.name, 0.0)
if odiscount and not discount:
discount = odiscount
if odiscount and discount and odiscount > discount:
discount = odiscount
return discount
def get_refine_price(self, base):
"""Price of refining"""
loc = self.caller.location
price = 0
prices = loc.db.crafting_prices or {}
if "refine" in prices:
price = (base * prices["refine"]) / 100.0
elif "all" in prices:
price = (base * prices["all"]) / 100.0
if price == 0:
return price
if price > 0:
price -= (price * self.get_discount() / 100.0)
if price < 0:
return 0
return price
raise ValueError
def get_recipe_price(self, recipe):
"""Price for crafting a recipe"""
loc = self.caller.location
base = recipe.value
price = 0
crafting_prices = loc.db.crafting_prices or {}
if recipe.id in crafting_prices:
price = (base * crafting_prices[recipe.id]) / 100.0
elif "all" in crafting_prices:
price = (base * crafting_prices["all"]) / 100.0
if price is not None:
price -= (price * self.get_discount() / 100.0)
if price < 0:
return 0
return price
# no price defined
raise ValueError
def list_prices(self):
"""List prices of everything"""
loc = self.caller.location
prices = loc.db.crafting_prices or {}
msg = "{wCrafting Prices{n\n"
table = PrettyTable(["{wName{n", "{wCraft Price{n", "{wRefine Price{n"])
recipes = loc.db.shopowner.player_ob.Dominion.assets.recipes.all().order_by('name')
# This try/except block corrects 'removed' lists that are corrupted by
# non-integers, because that was a thing once upon a time.
try:
removed = prices.get("removed", [])
recipes = recipes.exclude(id__in=removed)
except ValueError:
removed = [ob for ob in removed if isinstance(ob, int)]
prices['removed'] = removed
recipes = recipes.exclude(id__in=removed)
recipes = self.filter_shop_qs(recipes, "name")
for recipe in recipes:
try:
refineprice = str(self.get_refine_price(recipe.value))
table.add_row([recipe.name, str(recipe.additional_cost + self.get_recipe_price(recipe)),
refineprice])
except (ValueError, TypeError):
self.msg("{rError: Recipe %s does not have a price defined.{n" % recipe.name)
if recipes:
msg += str(table)
msg += "\n{wItem Prices{n\n"
table = EvTable("{wID{n", "{wName{n", "{wPrice{n", width=78, border="cells")
prices = loc.db.item_prices or {}
sale_items = ObjectDB.objects.filter(id__in=prices.keys())
sale_items = self.filter_shop_qs(sale_items, "db_key")
for item in sale_items:
price = prices[item.id]
price -= (price * self.get_discount() / 100.0)
table.add_row(item.id, item.name, price)
if sale_items:
msg += str(table)
designs = self.filter_shop_dict(loc.db.template_designs or {})
if designs:
msg += "\n{wNames of designs:{n %s" % ", ".join(designs.keys())
if not recipes and not sale_items and not designs:
msg = "Nothing found."
return msg
def filter_shop_qs(self, shop_qs, field_name):
"""Returns filtered queryset if a filter word exists"""
if "filter" in self.switches and self.args:
filter_query = {"%s__icontains" % field_name: self.args}
shop_qs = shop_qs.filter(**filter_query)
return shop_qs
def filter_shop_dict(self, shop_dict):
"""Returns filtered dict if a filter word exists"""
if "filter" in self.switches and self.args:
shop_dict = {name: value for name, value in shop_dict.items() if self.args.lower() in name.lower()}
return shop_dict
def pay_owner(self, price, msg):
"""Pay money to the other and send an inform of the sale"""
loc = self.caller.location
loc.db.shopowner.pay_money(-price)
assets = loc.db.shopowner.player_ob.assets
if price >= assets.min_silver_for_inform:
assets.inform(msg, category="shop", append=True)
def buy_item(self, item):
"""Buy an item from inventory - pay the owner and get the item"""
loc = self.caller.location
price = loc.db.item_prices[item.id]
price -= price * (self.get_discount() / 100.0)
self.caller.pay_money(price)
self.pay_owner(price, "%s has bought %s for %s." % (self.caller, item, price))
self.caller.msg("You paid %s for %s." % (price, item))
item.move_to(self.caller)
item.tags.remove("for_sale")
item.attributes.remove("sale_location")
del loc.db.item_prices[item.id]
if hasattr(item, "rmkey"):
if item.rmkey(loc.db.shopowner):
item.grantkey(self.caller)
self.caller.msg("Good deal! The owner gave you a key for %s." % item)
return
self.caller.msg("Shady deal? The owner didn't have a key for %s to give you." % item)
def check_blacklist(self):
"""See if we're allowed to buy"""
caller = self.caller
loc = caller.location
blacklist = loc.db.blacklist or []
if caller in blacklist:
return True
for org in caller.player_ob.Dominion.current_orgs:
if org.name in blacklist:
return True
return False
def func(self):
"""Execute command."""
caller = self.caller
loc = caller.location
self.crafter = loc.db.shopowner
if not self.crafter:
self.msg("No shop owner is defined.")
return
if self.check_blacklist():
caller.msg("You are not permitted to buy from this shop.")
return
if self.crafter.roster.roster.name == "Gone":
self.msg("The shop owner is dead.")
return
if "filter" in self.switches or (not self.switches and not self.args):
caller.msg(self.list_prices())
project = caller.db.crafting_project
if project:
caller.msg(self.display_project(project))
return
if "viewdesigns" in self.switches:
designs = loc.db.template_designs or {}
if not self.args:
self.msg("Names of designs: %s" % ", ".join(designs.keys()))
return
try:
design = designs[self.args]
self.msg("{wDesign's appearance:{n\n%s" % design)
self.msg("\n{wRaw code of design:{n\n%s" % raw(design))
except KeyError:
self.msg("No design found by that name.")
self.msg("Names of designs: %s" % ", ".join(designs.keys()))
return
if "buy" in self.switches:
try:
num = int(self.args)
price = loc.db.item_prices[num]
obj = ObjectDB.objects.get(id=num)
except (TypeError, ValueError, KeyError):
caller.msg("You must supply the ID number of an item being sold.")
return
if price > caller.db.currency:
caller.msg("You cannot afford it.")
return
self.buy_item(obj)
return
if "look" in self.switches:
try:
num = int(self.args)
obj = ObjectDB.objects.get(id=num, id__in=loc.db.item_prices.keys())
except (TypeError, ValueError):
self.msg("Please provide a number of an item.")
return
except ObjectDB.DoesNotExist:
caller.msg("No item found by that number.")
return
caller.msg(obj.return_appearance(caller))
return
if set(self.switches) & set(self.crafting_switches + ("craft",)):
return CmdCraft.func(self)
caller.msg("Invalid switch.")
class ShopCmdSet(CmdSet):
"""CmdSet for shop spaces."""
key = "ShopCmdSet"
priority = 101
duplicates = False
no_exits = False
no_objs = False
def at_cmdset_creation(self):
"""
This is the only method defined in a cmdset, called during
its creation. It should populate the set with command instances.
Note that it can also take other cmdsets as arguments, which will
be used by the character default cmdset to add all of these onto
the internal cmdset stack. They will then be able to removed or
replaced as needed.
"""
self.add(CmdManageShop())
self.add(CmdBuyFromShop())
|
the-stack_0_10127 | #
# Copyright 2013 eNovance
#
# Author: Julien Danjou <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for ceilometer/publisher/udp.py
"""
import datetime
import mock
import msgpack
from ceilometer.openstack.common.fixture import config
from ceilometer.openstack.common import network_utils
from ceilometer.openstack.common import test
from ceilometer.publisher import udp
from ceilometer.publisher import utils
from ceilometer import sample
COUNTER_SOURCE = 'testsource'
class TestUDPPublisher(test.BaseTestCase):
test_data = [
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
source=COUNTER_SOURCE,
),
sample.Sample(
name='test',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
source=COUNTER_SOURCE,
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
source=COUNTER_SOURCE,
),
sample.Sample(
name='test2',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
source=COUNTER_SOURCE,
),
sample.Sample(
name='test3',
type=sample.TYPE_CUMULATIVE,
unit='',
volume=1,
user_id='test',
project_id='test',
resource_id='test_run_tasks',
timestamp=datetime.datetime.utcnow().isoformat(),
resource_metadata={'name': 'TestPublish'},
source=COUNTER_SOURCE,
),
]
def _make_fake_socket(self, published):
def _fake_socket_socket(family, type):
def record_data(msg, dest):
published.append((msg, dest))
udp_socket = mock.Mock()
udp_socket.sendto = record_data
return udp_socket
return _fake_socket_socket
def setUp(self):
super(TestUDPPublisher, self).setUp()
self.CONF = self.useFixture(config.Config()).conf
self.CONF.publisher.metering_secret = 'not-so-secret'
def test_published(self):
self.data_sent = []
with mock.patch('socket.socket',
self._make_fake_socket(self.data_sent)):
publisher = udp.UDPPublisher(
network_utils.urlsplit('udp://somehost'))
publisher.publish_samples(None,
self.test_data)
self.assertEqual(5, len(self.data_sent))
sent_counters = []
for data, dest in self.data_sent:
counter = msgpack.loads(data)
sent_counters.append(counter)
# Check destination
self.assertEqual(('somehost',
self.CONF.collector.udp_port), dest)
# Check that counters are equal
self.assertEqual(sorted(
[utils.meter_message_from_counter(d, "not-so-secret")
for d in self.test_data]), sorted(sent_counters))
@staticmethod
def _raise_ioerror(*args):
raise IOError
def _make_broken_socket(self, family, type):
udp_socket = mock.Mock()
udp_socket.sendto = self._raise_ioerror
return udp_socket
def test_publish_error(self):
with mock.patch('socket.socket',
self._make_broken_socket):
publisher = udp.UDPPublisher(
network_utils.urlsplit('udp://localhost'))
publisher.publish_samples(None,
self.test_data)
|
the-stack_0_10128 | from dataLoader import *
import tensorflow as tf
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow import keras
from tensorflow.keras import metrics
from ModelUtil import *
import configparser
import sys
import numpy as np
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
tf.config.experimental.set_memory_growth(tf.config.experimental.list_physical_devices('GPU')[0], True)
config = configparser.ConfigParser()
config.read('target_model_config.ini')
DATA_NAME = sys.argv[1] if len(sys.argv) > 1 else "CIFAR"
MODEL = sys.argv[2] if len(sys.argv) > 2 else "ResNet50"
EPOCHS = int(config['{}_{}'.format(DATA_NAME, MODEL)]['EPOCHS'])
BATCH_SIZE = 64
LEARNING_RATE = float(config['{}_{}'.format(DATA_NAME, MODEL)]['LEARNING_RATE'])
WEIGHTS_PATH = "weights/Target/{}_{}.hdf5".format(DATA_NAME, MODEL)
(x_train, y_train), (x_test, y_test), _ = globals()['load_' + DATA_NAME]('TargetModel')
np.random.seed(1)
tf.random.set_seed(1)
def train(model, x_train, y_train):
"""
Train the target model and save the weight of the model
:param model: the model that will be trained
:param x_train: the image as numpy format
:param y_train: the label for x_train
:param weights_path: path to save the model file
:return: None
"""
model.compile(loss='categorical_crossentropy',
optimizer=keras.optimizers.Adam(lr=5e-5),
metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()])
model.fit(x_train,
y_train,
validation_data=(x_test, y_test),
batch_size=BATCH_SIZE,
epochs=EPOCHS)
model.save(WEIGHTS_PATH)
def evaluate(x_test, y_test):
model = keras.models.load_model(WEIGHTS_PATH)
model.compile(loss='categorical_crossentropy',
metrics=[metrics.CategoricalAccuracy(), metrics.Precision(), metrics.Recall()])
loss, accuracy, precision, recall = model.evaluate(x_test, y_test, verbose=1)
F1_Score = 2 * (precision * recall) / (precision + recall)
print('loss:%.4f accuracy:%.4f precision:%.4f recall:%.4f F1_Score:%.4f'
% (loss, accuracy, precision, recall, F1_Score))
TargetModel = globals()['create_{}_model'.format(MODEL)](x_train.shape[1:], y_train.shape[1])
train(TargetModel, x_train, y_train)
evaluate(x_train, y_train)
evaluate(x_test, y_test)
|
the-stack_0_10129 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource import ProxyResource
class ServerDnsAlias(ProxyResource):
"""A server DNS alias.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: Resource ID.
:vartype id: str
:ivar name: Resource name.
:vartype name: str
:ivar type: Resource type.
:vartype type: str
:ivar azure_dns_record: The fully qualified DNS record for alias
:vartype azure_dns_record: str
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'azure_dns_record': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'azure_dns_record': {'key': 'properties.azureDnsRecord', 'type': 'str'},
}
def __init__(self, **kwargs) -> None:
super(ServerDnsAlias, self).__init__(**kwargs)
self.azure_dns_record = None
|
the-stack_0_10130 | from rainworms import *
import random
class Bot():
def __init__(self):
self.game: RainWorms = None
@staticmethod
def in_roll_take_phase(possible_actions: List[PlayerAction]) -> bool:
return any([
action for action in possible_actions
if action.action_type == PlayerActionType.ROLL_DICE
])
@staticmethod
def in_take_steal_phase(possible_actions: List[PlayerAction]) -> bool:
return any([
action for action in possible_actions
if action.action_type == PlayerActionType.TAKE_STONE_WITH_VALUE
or action.action_type == PlayerActionType.STEAL_STONE_WITH_VALUE
])
@staticmethod
def in_pick_dice_phase(possible_actions: List[PlayerAction]) -> bool:
return any([
action for action in possible_actions
if action.action_type == PlayerActionType.PICK_DICE_SET_WITH_FACE
])
def select_action(self, player: Player, possible_actions: List[PlayerAction]):
raise NotImplementedError
def game_loop(self, game, player: Player, turn):
self.game = game
possible_actions = next(turn)
while 1:
action = self.select_action(player, possible_actions)
try:
possible_actions = turn.send(action)
except StopIteration:
break
class RandomBot(Bot):
def __init__(self):
super(RandomBot, self).__init__()
self.name = f"RandomBot"
def select_action(self, player: Player, possible_actions: List[PlayerAction]):
return random.choice(possible_actions)
class GreedyBot(Bot):
""" This bot tries to take the highest scoring set of dice every time, and will always take the highest stone it can.
It will start looking to take stones after a set threshold number of rolls. """
def __init__(self, take_stone_threshold):
super(GreedyBot, self).__init__()
self.name = f"GreedyBot_{take_stone_threshold}"
self.take_stone_threshold = take_stone_threshold
@staticmethod
def key_dice_set_actions(action: PlayerAction) -> int:
number = action.argument.face.value
is_worm = int(action.argument.face.name == "Worm")
return number + is_worm
def select_action(self, player: Player, possible_actions: List[PlayerAction]) -> PlayerAction:
if Bot.in_roll_take_phase(possible_actions):
# Pick `roll` until we reach a threshold, then pick take a stone if possible.
if len(Utils.count_faces(player.selected_dice)) < self.take_stone_threshold:
return PlayerAction(PlayerActionType.ROLL_DICE, None)
if any([action for action in possible_actions if action.action_type == PlayerActionType.TAKE_STONE]):
return PlayerAction(PlayerActionType.TAKE_STONE, None)
return PlayerAction(PlayerActionType.ROLL_DICE, None)
elif Bot.in_pick_dice_phase(possible_actions):
possible_actions = sorted(possible_actions, key=self.key_dice_set_actions)
return possible_actions[-1]
elif Bot.in_take_steal_phase(possible_actions):
# Try to pick the highest stone, disregarding stealing or taking from the bank
sorted_actions = sorted(possible_actions, key=lambda x: x.argument)
best_stone_action = sorted_actions[-1]
return best_stone_action
else:
# Fall back to a random choice
return random.choice(possible_actions)
class GreedyStealingBot(GreedyBot):
""" This is GreedyBot, except it tries to take the highest stone that it can steal. """
def __init__(self, take_stone_threshold):
super(GreedyStealingBot, self).__init__()
self.name = f"GreedyStealingBot_{take_stone_threshold}"
self.take_stone_threshold = take_stone_threshold
@staticmethod
def key_pick_stone(action: PlayerAction) -> int:
score = action.argument
# Add 20 points if this is a stealable stone.
if action.action_type == PlayerActionType.STEAL_STONE_WITH_VALUE:
score += 20
return score |
the-stack_0_10131 | # Copyright 2021 solo-learn development team.
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the
# Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies
# or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from pprint import pprint
from pytorch_lightning import Trainer, seed_everything
from pytorch_lightning.callbacks import LearningRateMonitor
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.plugins import DDPPlugin
from solo.args.setup import parse_args_pretrain
from solo.methods import METHODS
from solo.utils.auto_resumer import AutoResumer
try:
from solo.methods.dali import PretrainABC
except ImportError as e:
print(e)
_dali_avaliable = False
else:
_dali_avaliable = True
try:
from solo.utils.auto_umap import AutoUMAP
except ImportError:
_umap_available = False
else:
_umap_available = True
from solo.utils.checkpointer import Checkpointer
from solo.utils.classification_dataloader import prepare_data as prepare_data_classification
from solo.utils.pretrain_dataloader import (
prepare_dataloader,
prepare_datasets,
prepare_n_crop_transform,
prepare_transform,
)
def main():
seed_everything(5)
args = parse_args_pretrain()
assert args.method in METHODS, f"Choose from {METHODS.keys()}"
if args.num_large_crops != 2:
assert args.method == "wmse"
MethodClass = METHODS[args.method]
if args.dali:
assert (
_dali_avaliable
), "Dali is not currently avaiable, please install it first with [dali]."
MethodClass = type(f"Dali{MethodClass.__name__}", (MethodClass, PretrainABC), {})
model = MethodClass(**args.__dict__)
# contrastive dataloader
if not args.dali:
# asymmetric augmentations
if args.unique_augs > 1:
transform = [
prepare_transform(args.dataset, **kwargs) for kwargs in args.transform_kwargs
]
else:
transform = [prepare_transform(args.dataset, **args.transform_kwargs)]
transform = prepare_n_crop_transform(transform, num_crops_per_aug=args.num_crops_per_aug)
if args.debug_augmentations:
print("Transforms:")
pprint(transform)
train_dataset = prepare_datasets(
args.dataset,
transform,
data_dir=args.data_dir,
train_dir=args.train_dir,
no_labels=args.no_labels,
)
train_loader = prepare_dataloader(
train_dataset, batch_size=args.batch_size, num_workers=args.num_workers
)
# normal dataloader for when it is available
if args.dataset == "custom" and (args.no_labels or args.val_dir is None):
val_loader = None
elif args.dataset in ["imagenet100", "imagenet"] and args.val_dir is None:
val_loader = None
else:
_, val_loader = prepare_data_classification(
args.dataset,
data_dir=args.data_dir,
train_dir=args.train_dir,
val_dir=args.val_dir,
batch_size=args.batch_size,
num_workers=args.num_workers,
)
callbacks = []
# wandb logging
if args.wandb:
wandb_logger = WandbLogger(
name=args.name,
project=args.project,
entity=args.entity,
offline=args.offline,
)
wandb_logger.watch(model, log="gradients", log_freq=100)
wandb_logger.log_hyperparams(args)
# lr logging
lr_monitor = LearningRateMonitor(logging_interval="epoch")
callbacks.append(lr_monitor)
if args.save_checkpoint:
# save checkpoint on last epoch only
ckpt = Checkpointer(
args,
logdir=os.path.join(args.checkpoint_dir, args.method),
frequency=args.checkpoint_frequency,
)
callbacks.append(ckpt)
if args.auto_umap:
assert (
_umap_available
), "UMAP is not currently avaiable, please install it first with [umap]."
auto_umap = AutoUMAP(
args,
logdir=os.path.join(args.auto_umap_dir, args.method),
frequency=args.auto_umap_frequency,
)
callbacks.append(auto_umap)
if args.auto_resume and args.resume_from_checkpoint is None:
auto_resumer = AutoResumer(
checkpoint_dir=os.path.join(args.checkpoint_dir, args.method),
max_hours=args.auto_resumer_max_hours,
)
resume_from_checkpoint = auto_resumer.find_checkpoint(args)
if resume_from_checkpoint is not None:
print(
"Resuming from previous checkpoint that matches specifications:",
f"'{resume_from_checkpoint}'",
)
args.resume_from_checkpoint = resume_from_checkpoint
trainer = Trainer.from_argparse_args(
args,
logger=wandb_logger if args.wandb else None,
callbacks=callbacks,
plugins=DDPPlugin(find_unused_parameters=True) if args.accelerator == "ddp" else None,
checkpoint_callback=False,
terminate_on_nan=True,
)
if args.dali:
trainer.fit(model, val_dataloaders=val_loader)
else:
trainer.fit(model, train_loader, val_loader)
if __name__ == "__main__":
main()
|
the-stack_0_10132 | # Copyright 2022 The KerasCV Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
from keras_cv.utils import preprocessing
class MockRandomGenerator:
def __init__(self, value):
self.value = value
def random_uniform(self, shape, minval, maxval, dtype=None):
del minval, maxval
return tf.constant(self.value, dtype=dtype)
class PreprocessingTestCase(tf.test.TestCase):
def setUp(self):
super().setUp()
def test_transform_to_standard_range_neg_one_range(self):
x = tf.constant([-1, 0, 1])
x = preprocessing.transform_value_range(
x, original_range=[-1, 1], target_range=[0, 255]
)
self.assertAllClose(x, [0.0, 127.5, 255.0])
def test_transform_to_same_range(self):
x = tf.constant([-1, 0, 1])
x = preprocessing.transform_value_range(
x, original_range=[0, 255], target_range=[0, 255]
)
self.assertAllClose(x, [-1, 0, 1])
def test_transform_to_standard_range(self):
x = tf.constant([8 / 255, 9 / 255, 255 / 255])
x = preprocessing.transform_value_range(
x, original_range=[0, 1], target_range=[0, 255]
)
self.assertAllClose(x, [8.0, 9.0, 255.0])
def test_transform_to_value_range(self):
x = tf.constant([128.0, 255.0, 0.0])
x = preprocessing.transform_value_range(
x, original_range=[0, 255], target_range=[0, 1]
)
self.assertAllClose(x, [128 / 255, 1, 0])
def test_random_inversion(self):
generator = MockRandomGenerator(0.75)
self.assertEqual(preprocessing.random_inversion(generator), -1.0)
generator = MockRandomGenerator(0.25)
self.assertEqual(preprocessing.random_inversion(generator), 1.0)
|
the-stack_0_10133 |
from __future__ import absolute_import
import re
import os
import time
import math
import toolz
import click
import pprint
import logging
import inspect
import warnings
import itertools
import functools
import subprocess
from jrnr._compat import exclusive_open
FORMAT = '%(asctime)-15s %(message)s'
logger = logging.getLogger('uploader')
logger.setLevel('DEBUG')
formatter = logging.Formatter(FORMAT)
SLURM_SCRIPT = '''
#!/bin/bash
# Job name:
#SBATCH --job-name={jobname}
#
# Partition:
#SBATCH --partition={partition}
#
# Account:
#SBATCH --account=co_laika
#
# QoS:
#SBATCH --qos=savio_lowprio
#
#SBATCH --nodes=1
#
# Wall clock limit:
#SBATCH --time=72:00:00
#
#SBATCH --requeue
{dependencies}
{output}
'''.strip()
SLURM_MULTI_SCRIPT = SLURM_SCRIPT + '''
#
#SBATCH --array=0-{maxnodes}
# set up directories
mkdir -p {logdir}
mkdir -p locks
## Run command
for i in {{1..{jobs_per_node}}}
do
nohup python {filepath} do_job --job_name {jobname} \
--job_id {uniqueid} --num_jobs {numjobs} --logdir "{logdir}" {flags} \
> {logdir}/nohup-{jobname}-{uniqueid}-${{SLURM_ARRAY_TASK_ID}}-$i.out &
done
python {filepath} wait --job_name {jobname} \
--job_id {uniqueid} --num_jobs {numjobs} {flags}
'''
SLURM_SINGLE_SCRIPT = SLURM_SCRIPT + '''
## Run command
python {filepath} {flags}
'''
def _product(values):
'''
Examples
--------
.. code-block:: python
>>> _product([3, 4, 5])
60
'''
return functools.reduce(lambda x, y: x*y, values, 1)
def _unpack_job(specs):
job = {}
for spec in specs:
job.update(spec)
return job
def generate_jobs(job_spec):
for specs in itertools.product(*job_spec):
yield _unpack_job(specs)
def count_jobs(job_spec):
return _product(map(len, job_spec))
def _prep_slurm(
filepath,
jobname='slurm_job',
partition='savio2',
job_spec=None,
limit=None,
uniqueid='"${SLURM_ARRAY_JOB_ID}"',
jobs_per_node=24,
maxnodes=100,
dependencies=None,
logdir='log',
flags=None):
depstr = ''
if (dependencies is not None) and (len(dependencies) > 1):
status, deps = dependencies
if len(deps) > 0:
depstr += (
'#\n#SBATCH --dependency={}:{}'
.format(status, ','.join(map(str, deps))))
if flags:
flagstr = ' '.join(map(str, flags))
else:
flagstr = ''
if job_spec:
n = count_jobs(job_spec)
if limit is not None:
n = min(limit, n)
numjobs = n
output = (
'#\n#SBATCH --output {logdir}/slurm-{jobname}-%A_%a.out'
.format(jobname=jobname, logdir=logdir))
template = SLURM_MULTI_SCRIPT
else:
numjobs = 1
output = (
'#\n#SBATCH --output {logdir}/slurm-{jobname}-%A.out'
.format(jobname=jobname, logdir=logdir))
template = SLURM_SINGLE_SCRIPT
with open('run-slurm.sh', 'w+') as f:
f.write(template.format(
jobname=jobname,
partition=partition,
numjobs=numjobs,
jobs_per_node=jobs_per_node,
maxnodes=(maxnodes-1),
uniqueid=uniqueid,
filepath=filepath.replace(os.sep, '/'),
dependencies=depstr,
flags=flagstr,
logdir=logdir,
output=output))
def run_slurm(
filepath,
jobname='slurm_job',
partition='savio2',
job_spec=None,
limit=None,
uniqueid='"${SLURM_ARRAY_JOB_ID}"',
jobs_per_node=24,
maxnodes=100,
dependencies=None,
logdir='log',
flags=None):
_prep_slurm(
filepath=filepath,
jobname=jobname,
partition=partition,
job_spec=job_spec,
limit=limit,
uniqueid=uniqueid,
jobs_per_node=jobs_per_node,
maxnodes=maxnodes,
dependencies=dependencies,
logdir=logdir,
flags=flags)
job_command = ['sbatch', 'run-slurm.sh']
proc = subprocess.Popen(
job_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
matcher = re.search(r'^\s*Submitted batch job (?P<run_id>[0-9]+)\s*$', out)
if matcher:
run_id = int(matcher.group('run_id'))
else:
run_id = None
if err:
raise OSError('Error encountered submitting job: {}'.format(err))
return run_id
def get_job_by_index(job_spec, index):
'''
Examples
--------
.. code-block:: python
>>> job = get_job_by_index(
... (
... [{'let': 'a'}, {'let': 'b'}, {'let': 'c'}],
... [{'num': 1}, {'num': 2}, {'num': 3}],
... [{'pitch': 'do'}, {'pitch': 'rey'}, {'pitch': 'mi'}]),
... 5)
...
>>> sorted(zip(job.keys(), job.values())) # test job ordered
[('let', 'a'), ('num', 2), ('pitch', 'mi')]
>>> job = get_job_by_index(
... tuple(map(
... lambda x: [{x: i} for i in x],
... ['hi', 'hello', 'bye'])),
... 10)
...
>>> sorted(zip(job.keys(), job.values())) # test job ordered
[('bye', 'y'), ('hello', 'l'), ('hi', 'h')]
'''
return _unpack_job([
job_spec[i][
(index//(_product(map(len, job_spec[i+1:]))) % len(job_spec[i]))]
for i in range(len(job_spec))])
def _get_call_args(job_spec, index=0):
'''
Places stringified job parameters into `metadata` dict along with job spec
.. code-block:: python
>>> job_spec = (
... [{'ordinal': 1, 'zeroth': 0}, {'ordinal': 2, 'zeroth': 1}],
... [{'letter': 'a'}, {'letter': 'b'}],
... [{'name': 'susie', 'age': 8}, {'name': 'billy', 'age': 6}])
...
>>> job = _get_call_args(job_spec, 2)
>>> job # doctest: +SKIP
{'age': 8, 'letter': 'b', 'name': 'susie', 'ordinal': 1, 'zeroth': 0}
>>> notmeta = {k: v for k, v in job.items() if k != 'metadata'}
>>> meta = job['metadata']
>>> sorted(zip(notmeta.keys(), notmeta.values())) \
# doctest: +NORMALIZE_WHITESPACE
[('age', 8), ('letter', 'b'), ('name', 'susie'),
('ordinal', 1), ('zeroth', 0)]
>>> sorted(zip(meta.keys(), meta.values())) \
# doctest: +NORMALIZE_WHITESPACE
[('age', '8'), ('letter', 'b'), ('name', 'susie'),
('ordinal', '1'), ('zeroth', '0')]
'''
job = get_job_by_index(job_spec, index)
metadata = {}
metadata.update({k: str(v) for k, v in job.items()})
call_args = {'metadata': metadata}
call_args.update(job)
return call_args
@toolz.curry
def slurm_runner(
run_job,
job_spec,
filepath=None,
onfinish=None,
return_index=False):
'''
Decorator to create a SLURM runner job management command-line application
Parameters
----------
run_job : function
Function executed for each task specified by ``job_spec``. ``run_job``
must be of the form ``run_job(metadata, interactive=False, **kwargs)``
where ``kwargs`` is the set of keyword terms specified by ``job_spec``.
job_spec : tuple of lists of dicts
Job specification in the format ``([{kwargs: vals}, ...], [...], )``.
``slurm_runner`` will iterate through all combinations of the lists in
``job_spec``, combining paired kwarg dictionaries and passing them as
arguments to ``run_job``.
filepath : str, optional
Path to file to call when running tasks. By default (None),
slurm_runner infers the filepath from the location of ``run_job``.
onfinish : function, optional
Provide a function to call when all jobs have been completed. Default
(None) takes no action.
return_index : bool, optional
Adds a ``task_id`` argument to run_job call with 0-indexed ID of
current task
Returns
-------
slurm_runner : click.Group
A SLURM runner job management command-line application
'''
if filepath is None:
filepath = os.path.abspath(inspect.getfile(run_job))
else:
warning = (
"the `filepath` argument is deprecated and will be " +
"removed in the future.")
warnings.warn(warning, FutureWarning)
@click.group()
def slurm():
pass
@slurm.command()
@click.option(
'--limit', '-l', type=int, required=False, default=None,
help='Number of iterations to run')
@click.option(
'--jobs_per_node', '-n', type=int, required=False, default=24,
help='Number of jobs to run per node')
@click.option(
'--maxnodes', '-x', type=int, required=False, default=100,
help='Number of nodes to request for this job')
@click.option(
'--jobname', '-j', default='test', help='name of the job')
@click.option(
'--partition', '-p', default='savio2', help='resource on which to run')
@click.option('--dependency', '-d', type=int, multiple=True)
@click.option(
'--logdir', '-L', default='log', help='Directory to write log files')
@click.option(
'--uniqueid', '-u', default='"${SLURM_ARRAY_JOB_ID}"',
help='Unique job pool id')
def prep(
limit=None,
jobs_per_node=24,
jobname='slurm_job',
dependency=None,
partition='savio2',
maxnodes=100,
logdir='log',
uniqueid='"${SLURM_ARRAY_JOB_ID}"'):
_prep_slurm(
filepath=filepath,
jobname=jobname,
partition=partition,
job_spec=job_spec,
jobs_per_node=jobs_per_node,
maxnodes=maxnodes,
limit=limit,
uniqueid=uniqueid,
logdir=logdir,
dependencies=('afterany', list(dependency)))
@slurm.command()
@click.option(
'--limit', '-l', type=int, required=False, default=None,
help='Number of iterations to run')
@click.option(
'--jobs_per_node', '-n', type=int, required=False, default=24,
help='Number of jobs to run per node')
@click.option(
'--maxnodes', '-x', type=int, required=False, default=100,
help='Number of nodes to request for this job')
@click.option(
'--jobname', '-j', default='test', help='name of the job')
@click.option(
'--partition', '-p', default='savio2', help='resource on which to run')
@click.option(
'--dependency', '-d', type=int, multiple=True)
@click.option(
'--logdir', '-L', default='log', help='Directory to write log files')
@click.option(
'--uniqueid', '-u', default='"${SLURM_ARRAY_JOB_ID}"',
help='Unique job pool id')
def run(
limit=None,
jobs_per_node=24,
jobname='slurm_job',
dependency=None,
partition='savio2',
maxnodes=100,
logdir='log',
uniqueid='"${SLURM_ARRAY_JOB_ID}"'):
if not os.path.isdir(logdir):
os.makedirs(logdir)
slurm_id = run_slurm(
filepath=filepath,
jobname=jobname,
partition=partition,
job_spec=job_spec,
jobs_per_node=jobs_per_node,
maxnodes=maxnodes,
limit=limit,
uniqueid=uniqueid,
logdir=logdir,
dependencies=('afterany', list(dependency)))
finish_id = run_slurm(
filepath=filepath,
jobname=jobname+'_finish',
partition=partition,
dependencies=('afterany', [slurm_id]),
logdir=logdir,
flags=['cleanup', slurm_id])
print('run job: {}\non-finish job: {}'.format(slurm_id, finish_id))
@slurm.command()
@click.argument('slurm_id')
def cleanup(slurm_id):
proc = subprocess.Popen(
[
'sacct', '-j', slurm_id,
'--format=JobID,JobName,MaxRSS,Elapsed,State'],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = proc.communicate()
print(out)
if onfinish:
onfinish()
@slurm.command()
@click.option('--job_name', required=True)
@click.option('--job_id', required=True)
@click.option('--num_jobs', required=True, type=int)
@click.option(
'--logdir', '-L', default='log', help='Directory to write log files')
def do_job(job_name, job_id, num_jobs=None, logdir='log'):
if not os.path.isdir('locks'):
os.makedirs('locks')
if not os.path.isdir(logdir):
os.makedirs(logdir)
for task_id in range(num_jobs):
lock_file = (
'locks/{}-{}-{}.{{}}'
.format(job_name, job_id, task_id))
if os.path.exists(lock_file.format('done')):
print('{} already done. skipping'.format(task_id))
continue
elif os.path.exists(lock_file.format('err')):
print('{} previously errored. skipping'.format(task_id))
continue
try:
with exclusive_open(lock_file.format('lck')):
pass
# Check for race conditions
if os.path.exists(lock_file.format('done')):
print('{} already done. skipping'.format(task_id))
if os.path.exists(lock_file.format('lck')):
os.remove(lock_file.format('lck'))
continue
elif os.path.exists(lock_file.format('err')):
print('{} previously errored. skipping'.format(task_id))
if os.path.exists(lock_file.format('lck')):
os.remove(lock_file.format('lck'))
continue
except OSError:
print('{} already in progress. skipping'.format(task_id))
continue
handler = logging.FileHandler(os.path.join(
logdir,
'run-{}-{}-{}.log'.format(job_name, job_id, task_id)))
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
try:
job_kwargs = _get_call_args(job_spec, task_id)
if return_index:
job_kwargs.update({'task_id': task_id})
logger.debug('Beginning job\nkwargs:\t{}'.format(
pprint.pformat(job_kwargs['metadata'], indent=2)))
run_job(**job_kwargs)
except (KeyboardInterrupt, SystemExit):
try:
logger.error('{} interupted, removing .lck file before exiting'.format(task_id))
os.remove(lock_file.format('lck'))
except:
pass
raise
except Exception as e:
logger.error(
'Error encountered in job {} {} {}'
.format(job_name, job_id, task_id),
exc_info=e)
with open(lock_file.format('err'), 'w+'):
pass
else:
with open(lock_file.format('done'), 'w+'):
pass
finally:
if os.path.exists(lock_file.format('lck')):
os.remove(lock_file.format('lck'))
logger.removeHandler(handler)
@slurm.command()
@click.option('--job_name', '-j', required=True)
@click.option('--job_id', '-u', required=True)
def status(job_name, job_id, num_jobs=None, logdir='log'):
n = count_jobs(job_spec)
locks = os.listdir('locks')
count = int(math.log10(n)//1 + 1)
locked = len([
i for i in range(n)
if '{}-{}-{}.lck'.format(job_name, job_id, i) in locks])
done = len([
i for i in range(n)
if '{}-{}-{}.done'.format(job_name, job_id, i) in locks])
err = len([
i for i in range(n)
if '{}-{}-{}.err'.format(job_name, job_id, i) in locks])
print(
("\n".join(["{{:<15}}{{:{}d}}".format(count) for _ in range(4)]))
.format(
'jobs:', n,
'done:', done,
'in progress:', locked,
'errored:', err))
@slurm.command()
@click.option('--job_name', required=True)
@click.option('--job_id', required=True)
@click.option('--num_jobs', required=True, type=int)
def wait(job_name, job_id, num_jobs=None):
for task_id in range(num_jobs):
while not os.path.exists(
'locks/{}-{}-{}.done'
.format(job_name, job_id, task_id)):
time.sleep(10)
def run_interactive(task_id=0):
job_kwargs = _get_call_args(job_spec, task_id)
logger.debug('Beginning job\nkwargs:\t{}'.format(
pprint.pformat(job_kwargs['metadata'], indent=2)))
if return_index:
job_kwargs.update({'task_id': task_id})
return run_job(interactive=True, **job_kwargs)
slurm.run_interactive = run_interactive
return slurm
|
the-stack_0_10138 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thrusday 08 Feb 2018
Testing suite for topopy Flow class
@author: J. Vicente Perez
@email: [email protected]
"""
import unittest
import sys
import numpy as np
# Add to the path code folder and data folder
sys.path.append("../")
from topopy import Grid, Flow, Network
class CreateNetwork(unittest.TestCase):
def test_create_load(self):
dem_files = ['tunez', 'tunez2', 'small25']
for filename in dem_files:
fd = Flow("data/fd_{0}.tif".format(filename))
st = Network(fd, 1000)
computed = [st.get_dims(), st.get_size(), st.get_ncells(),
st.get_cellsize(), st.get_geotransform(), st.get_projection()]
expected = [fd.get_dims(), fd.get_size(), fd.get_ncells(),
fd.get_cellsize(), fd.get_geotransform(), fd.get_projection()]
self.assertTrue(computed, expected)
def test_streams(self):
dem_files = ['tunez', 'tunez2', 'small25']
for filename in dem_files:
fd = Flow("data/fd_{0}.tif".format(filename))
st = Network(fd, 1000)
streams = st.get_streams()
st01 = streams.read_array()
st02 = Grid("data/str_{0}.tif".format(filename)).read_array()
self.assertTrue(np.array_equal(st01, st02), True)
def test_streampoi(self):
dem_files = ['tunez', 'tunez2', 'small25']
for filename in dem_files:
fd = Flow("data/fd_{0}.tif".format(filename))
st = Network(fd, 1000)
kinds = ['heads', 'confluences', 'outlets']
for kind in kinds:
poi = st.get_stream_poi(kind)
rows = poi[0].reshape((poi[0].size, 1))
cols = poi[1].reshape((poi[1].size, 1))
comp_poi = np.append(rows, cols, axis=1)
exp_poi = np.load("data/mlab_files/{0}_{1}.npy".format(filename, kind))
self.assertEqual(np.array_equal(comp_poi, exp_poi), True)
def test_stream_segments(self):
dem_files = ['tunez', 'tunez2', 'small25']
for filename in dem_files:
fd = Flow("data/fd_{0}.tif".format(filename))
st = Network(fd, 1000)
ssegments = st.get_stream_segments(False)
esegments = Grid("data/mlab_files/{0}_segments.tif".format(filename)).read_array()
self.assertTrue(np.array_equal(ssegments, esegments), True)
def test_stream_order(self):
dem_files = ['tunez', 'tunez2', 'small25']
for filename in dem_files:
fd = Flow("data/fd_{0}.tif".format(filename))
st = Network(fd, 1000)
for kind in ['strahler', 'shreeve']:
exp_order = st.get_stream_order(kind = kind, asgrid=False)
cmp_order = Grid("data/mlab_files/{0}_{1}.tif".format(filename, kind)).read_array()
self.assertTrue(np.array_equal(exp_order, cmp_order), True)
if __name__ == "__main__":
unittest.main() |
the-stack_0_10139 | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys, glob
sys.path.insert(0, './gen-py')
lib_path = glob.glob('../../lib/py/build/lib.*')
if lib_path:
sys.path.insert(0, lib_path[0])
if sys.version_info[0] >= 3:
xrange = range
from ThriftTest import ThriftTest
from ThriftTest.ttypes import *
from thrift.transport import TTransport
from thrift.transport import TSocket
from thrift.protocol import TBinaryProtocol
import unittest
import time
import socket
import random
from optparse import OptionParser
class TimeoutTest(unittest.TestCase):
def setUp(self):
for i in xrange(50):
try:
# find a port we can use
self.listen_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = random.randint(10000, 30000)
self.listen_sock.bind(('localhost', self.port))
self.listen_sock.listen(5)
break
except Exception:
if i == 49:
raise
def testConnectTimeout(self):
starttime = time.time()
try:
leaky = []
for _ in xrange(100):
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
leaky.append(socket)
except Exception:
self.assert_(time.time() - starttime < 5.0)
def testWriteTimeout(self):
starttime = time.time()
try:
socket = TSocket.TSocket('localhost', self.port)
socket.setTimeout(10)
socket.open()
self.listen_sock.accept()
while True:
socket.write("hi" * 100)
except Exception:
self.assert_(time.time() - starttime < 5.0)
def suite():
suite = unittest.TestSuite()
loader = unittest.TestLoader()
suite.addTest(loader.loadTestsFromTestCase(TimeoutTest))
return suite
if __name__ == "__main__":
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(suite())
|
the-stack_0_10141 | import logging
import os
import tempfile
from contextlib import contextmanager
from typing import TYPE_CHECKING, Optional
from funcy import cached_property, first
from dvc import fs
from dvc.exceptions import DvcException
from dvc.utils import dict_sha256, relpath
from dvc_data.transfer import _log_exceptions
if TYPE_CHECKING:
from dvc_objects.db import ObjectDB
logger = logging.getLogger(__name__)
class RunCacheNotFoundError(DvcException):
def __init__(self, stage):
super().__init__(f"No run-cache for {stage.addressing}")
def _get_cache_hash(cache, key=False):
from dvc_objects.meta import Meta
if key:
cache["outs"] = [out["path"] for out in cache.get("outs", [])]
return dict_sha256(cache, exclude=[Meta.PARAM_SIZE, Meta.PARAM_NFILES])
def _can_hash(stage):
if stage.is_callback or stage.always_changed:
return False
if not all([stage.cmd, stage.deps, stage.outs]):
return False
for dep in stage.deps:
if not (dep.protocol == "local" and dep.def_path and dep.get_hash()):
return False
for out in stage.outs:
if out.protocol != "local" or not out.def_path or out.persist:
return False
return True
def _get_stage_hash(stage):
from .serialize import to_single_stage_lockfile
assert _can_hash(stage)
return _get_cache_hash(to_single_stage_lockfile(stage), key=True)
class StageCache:
def __init__(self, repo):
self.repo = repo
@cached_property
def cache_dir(self):
return os.path.join(self.repo.odb.local.cache_dir, "runs")
def _get_cache_dir(self, key):
return os.path.join(self.cache_dir, key[:2], key)
def _get_cache_path(self, key, value):
return os.path.join(self._get_cache_dir(key), value)
def _load_cache(self, key, value):
from voluptuous import Invalid
from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA
from dvc.utils.serialize import YAMLFileCorruptedError, load_yaml
path = self._get_cache_path(key, value)
try:
return COMPILED_LOCK_FILE_STAGE_SCHEMA(load_yaml(path))
except FileNotFoundError:
return None
except (YAMLFileCorruptedError, Invalid):
logger.warning("corrupted cache file '%s'.", relpath(path))
os.unlink(path)
return None
def _load(self, stage):
key = _get_stage_hash(stage)
if not key:
return None
cache_dir = self._get_cache_dir(key)
if not os.path.exists(cache_dir):
return None
for value in os.listdir(cache_dir):
cache = self._load_cache(key, value)
if cache:
return cache
return None
def _create_stage(self, cache, wdir=None):
from . import PipelineStage, create_stage
from .loader import StageLoader
stage = create_stage(
PipelineStage,
repo=self.repo,
path="dvc.yaml",
cmd=cache["cmd"],
wdir=wdir,
outs=[out["path"] for out in cache["outs"]],
external=True,
)
StageLoader.fill_from_lock(stage, cache)
return stage
@contextmanager
def _cache_type_copy(self):
cache_types = self.repo.odb.local.cache_types
self.repo.odb.local.cache_types = ["copy"]
try:
yield
finally:
self.repo.odb.local.cache_types = cache_types
def _uncached_outs(self, stage, cache):
# NOTE: using temporary stage to avoid accidentally modifying original
# stage and to workaround `commit/checkout` not working for uncached
# outputs.
cached_stage = self._create_stage(cache, wdir=stage.wdir)
outs_no_cache = [
out.def_path for out in stage.outs if not out.use_cache
]
# NOTE: using copy link to make it look like a git-tracked file
with self._cache_type_copy():
for out in cached_stage.outs:
if out.def_path in outs_no_cache:
yield out
def save(self, stage):
from .serialize import to_single_stage_lockfile
if not _can_hash(stage):
return
cache_key = _get_stage_hash(stage)
cache = to_single_stage_lockfile(stage)
cache_value = _get_cache_hash(cache)
existing_cache = self._load_cache(cache_key, cache_value)
cache = existing_cache or cache
for out in self._uncached_outs(stage, cache):
out.commit()
if existing_cache:
return
from dvc.schema import COMPILED_LOCK_FILE_STAGE_SCHEMA
from dvc.utils.serialize import dump_yaml
# sanity check
COMPILED_LOCK_FILE_STAGE_SCHEMA(cache)
path = self._get_cache_path(cache_key, cache_value)
parent = self.repo.odb.local.fs.path.parent(path)
self.repo.odb.local.makedirs(parent)
tmp = tempfile.NamedTemporaryFile(delete=False, dir=parent).name
assert os.path.exists(parent)
assert os.path.isdir(parent)
dump_yaml(tmp, cache)
self.repo.odb.local.move(tmp, path)
def restore(self, stage, run_cache=True, pull=False):
from .serialize import to_single_stage_lockfile
if not _can_hash(stage):
raise RunCacheNotFoundError(stage)
if (
not stage.changed_stage()
and stage.deps_cached()
and all(bool(out.hash_info) for out in stage.outs)
):
cache = to_single_stage_lockfile(stage)
else:
if not run_cache: # backward compatibility
raise RunCacheNotFoundError(stage)
stage.save_deps()
cache = self._load(stage)
if not cache:
raise RunCacheNotFoundError(stage)
cached_stage = self._create_stage(cache, wdir=stage.wdir)
if pull:
for objs in cached_stage.get_used_objs().values():
self.repo.cloud.pull(objs)
if not cached_stage.outs_cached():
raise RunCacheNotFoundError(stage)
logger.info(
"Stage '%s' is cached - skipping run, checking out outputs",
stage.addressing,
)
cached_stage.checkout()
def transfer(self, from_odb, to_odb):
from dvc.fs.callbacks import Callback
from_fs = from_odb.fs
to_fs = to_odb.fs
func = _log_exceptions(fs.generic.copy)
runs = from_fs.path.join(from_odb.fs_path, "runs")
ret = []
if not from_fs.exists(runs):
return ret
for src in from_fs.find(runs):
rel = from_fs.path.relpath(src, from_odb.fs_path)
dst = to_fs.path.join(to_odb.fs_path, rel)
key = to_fs.path.parent(dst)
# check if any build cache already exists for this key
# TODO: check if MaxKeys=1 or something like that applies
# or otherwise this will take a lot of time!
if to_fs.exists(key) and first(to_fs.find(key)):
continue
src_name = from_fs.path.name(src)
parent_name = from_fs.path.name(from_fs.path.parent(src))
with Callback.as_tqdm_callback(
desc=src_name,
bytes=True,
) as cb:
func(from_fs, src, to_fs, dst, callback=cb)
ret.append((parent_name, src_name))
return ret
def push(self, remote: Optional[str], odb: Optional["ObjectDB"] = None):
dest_odb = odb or self.repo.cloud.get_remote_odb(remote)
return self.transfer(self.repo.odb.local, dest_odb)
def pull(self, remote: Optional[str], odb: Optional["ObjectDB"] = None):
odb = odb or self.repo.cloud.get_remote_odb(remote)
return self.transfer(odb, self.repo.odb.local)
def get_used_objs(self, used_run_cache, *args, **kwargs):
"""Return used cache for the specified run-cached stages."""
from collections import defaultdict
used_objs = defaultdict(set)
for key, value in used_run_cache:
entry = self._load_cache(key, value)
if not entry:
continue
stage = self._create_stage(entry)
for odb, objs in stage.get_used_objs(*args, **kwargs).items():
used_objs[odb].update(objs)
return used_objs
|
the-stack_0_10142 | # -*- coding: utf-8 -*-
import getopt
import sys
import matplotlib.pyplot as plt
from simulation import Simulation
def main(argv):
population_size = 20
individual_size = 8
delta = 0.005
cross_point_count = 1
verbose = False
try:
opts, args = getopt.getopt(argv, 'hvp:i:c:d:', [
'help',
'verbose',
'population-size=',
'individual-size=',
'cross-points=',
'delta='
])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('-h', '--help'):
usage()
sys.exit()
if opt in ('-v', '--verbose'):
verbose = True
if opt in ('-p', '--population-size'):
population_size = int(arg)
if opt in ('-i', '--individual-size'):
individual_size = int(arg)
if opt in ('-c', '--cross-points'):
cross_point_count = int(arg)
if opt in ('-d', '--delta'):
delta = float(arg)
print("pop size = {}, ind size = {}, cross points = {}, delta = {}".format(
population_size, individual_size, cross_point_count, delta))
simulation = Simulation(
population_size, individual_size, cross_point_count, delta)
simulation.run_simulation()
if verbose:
print(simulation.population)
# Ploting
x_values = range(0, len(simulation.global_fitness_records))
y_values = [
simulation.global_fitness_records,
simulation.convergence_values
]
labels = ["Average fitness score", "Convergence values"]
for y_array, label in zip(y_values, labels):
plt.plot(x_values, y_array, label=label)
plt.legend()
plt.show()
def usage():
print("""
Usage : python genetic.py [OPTIONS]
You can specify the following options :
-v, --verbose:
If specified the last population will be printed out in the terminal.
-p, --population-size [integer]:
The size of the population that will be use in the genetic algorithm.
-u, --individual-size [integer]
The number of gene of each individual member of the population.
-d, --delta [float]
A value for the convergence detection. The less this value will be, the longer the algorithm will take to stop but the greater the average fitness value will be.
This value must be less than 1.
""")
if __name__ == "__main__":
main(sys.argv[1:])
|
the-stack_0_10143 | from proteus import *
from twp_navier_stokes_p import *
from dambreak_Ubbink_coarse import *
if timeDiscretization=='vbdf':
timeIntegration = VBDF
timeOrder=2
stepController = Min_dt_cfl_controller
elif timeDiscretization=='flcbdf':
timeIntegration = FLCBDF
#stepController = FLCBDF_controller_sys
stepController = Min_dt_cfl_controller
time_tol = 10.0*ns_nl_atol_res
atol_u = {1:time_tol,2:time_tol,3:time_tol}
rtol_u = {1:time_tol,2:time_tol,3:time_tol}
else:
timeIntegration = BackwardEuler_cfl
stepController = Min_dt_cfl_controller
femSpaces = {0:basis,
1:basis,
2:basis,
3:basis}
massLumping = False
numericalFluxType = None
conservativeFlux = None
numericalFluxType = RANS2P.NumericalFlux
subgridError = RANS2P.SubgridError(coefficients,nd,lag=ns_lag_subgridError,hFactor=hFactor)
shockCapturing = RANS2P.ShockCapturing(coefficients,nd,ns_shockCapturingFactor,lag=ns_lag_shockCapturing)
fullNewtonFlag = True
multilevelNonlinearSolver = Newton
levelNonlinearSolver = Newton
nonlinearSmoother = None
linearSmoother = SimpleNavierStokes3D
matrix = SparseMatrix
if useOldPETSc:
multilevelLinearSolver = PETSc
levelLinearSolver = PETSc
else:
multilevelLinearSolver = KSP_petsc4py
levelLinearSolver = KSP_petsc4py
if useSuperlu:
multilevelLinearSolver = LU
levelLinearSolver = LU
linear_solver_options_prefix = 'rans2p_'
levelNonlinearSolverConvergenceTest = 'r'
linearSolverConvergenceTest = 'r-true'
tolFac = 0.0
linTolFac = 0.01
l_atol_res = 0.01*vof_nl_atol_res
nl_atol_res = ns_nl_atol_res
useEisenstatWalker = False
maxNonlinearIts = 50
maxLineSearches = 0
conservativeFlux = {0:'pwl-bdm-opt'}
#auxiliaryVariables=[pointGauges,lineGauges]
|
the-stack_0_10144 | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Protocol for objects that are mixtures (probabilistic combinations)."""
from typing import Any, Sequence, Tuple, Union
import numpy as np
from typing_extensions import Protocol
from cirq.protocols.unitary import has_unitary
from cirq.type_workarounds import NotImplementedType
# This is a special indicator value used by the inverse method to determine
# whether or not the caller provided a 'default' argument.
RaiseTypeErrorIfNotProvided = ((0.0, []),) # type: Sequence[Tuple[float, Any]]
class SupportsMixture(Protocol):
"""An object that may be describable as a probabilistic combination.
"""
def _mixture_(self) -> Union[
Sequence[Tuple[float, Any]], NotImplementedType]:
"""Return the probabilistic mixture.
A mixture is described by an iterable of tuples of the form
(probability of object, object)
The probability components of the tuples must sum to 1.0 and be between
0 and 1 (inclusive).
Returns:
A tuple of (probability of object, object)
"""
def _has_mixture_(self) -> bool:
"""Whether this value has a mixture representation.
This method is used by the global `cirq.has_mixture` method. If this
method is not present, or returns NotImplemented, it will fallback
to using _mixture_ with a default value, or False if neither exist.
Returns:
True if the value has a mixture representation, Falseotherwise.
"""
def mixture(
val: Any,
default: Any = RaiseTypeErrorIfNotProvided) -> Sequence[Tuple[float, Any]]:
"""Return a sequence of tuples representing a probabilistic combination.
A mixture is described by an iterable of tuples of the form
(probability of object, object)
The probability components of the tuples must sum to 1.0 and be between
0 and 1 (inclusive).
Args:
val: The value whose mixture is being computed.
default: A default value if val does not support mixture.
Returns:
An iterable of tuples of size 2. The first element of the tuple is a
probability (between 0 and 1) and the second is the object that occurs
with that probability in the mixture. The probabilities will sum to 1.0.
"""
getter = getattr(val, '_mixture_', None)
result = NotImplemented if getter is None else getter()
if result is not NotImplemented:
return result
if default is not RaiseTypeErrorIfNotProvided:
return default
if getter is None:
raise TypeError(
"object of type '{}' has no _mixture_ method.".format(type(val)))
raise TypeError("object of type '{}' does have a _mixture_ method, "
"but it returned NotImplemented.".format(type(val)))
def has_mixture(val: Any) -> bool:
"""Returns whether the value has a mixture representation.
Returns:
If `val` has a `_has_mixture_` method and its result is not
NotImplemented, that result is returned. Otherwise, if the value
has a `_mixture_` method return True if that has a non-default value.
Returns False if neither function exists.
"""
getter = getattr(val, '_has_mixture_', None)
result = NotImplemented if getter is None else getter()
if result is not NotImplemented:
return result
# No _has_mixture_ function, use _mixture_ instead
return mixture(val, None) is not None
def mixture_channel(
val: Any,
default: Any = RaiseTypeErrorIfNotProvided) -> Sequence[
Tuple[float, np.ndarray]]:
"""Return a sequence of tuples for a channel that is a mixture of unitaries.
In contrast to `mixture` this method falls back to `unitary` if `_mixture_`
is not implemented.
A mixture channel is described by an iterable of tuples of the form
(probability of unitary, unitary)
The probability components of the tuples must sum to 1.0 and be between
0 and 1 (inclusive) and the `unitary` must be a unitary matrix.
Args:
val: The value whose mixture_channel is being computed.
default: A default value if val does not support mixture.
Returns:
An iterable of tuples of size 2. The first element of the tuple is a
probability (between 0 and 1) and the second is the unitary that occurs
with that probability. The probabilities will sum to 1.0.
"""
mixture_getter = getattr(val, '_mixture_', None)
result = NotImplemented if mixture_getter is None else mixture_getter()
if result is not NotImplemented:
return result
unitary_getter = getattr(val, '_unitary_', None)
result = NotImplemented if unitary_getter is None else unitary_getter()
if result is not NotImplemented:
return ((1.0, result),)
if default is not RaiseTypeErrorIfNotProvided:
return default
if mixture_getter is None and unitary_getter is None:
raise TypeError(
"object of type '{}' has no _mixture_ or _unitary_ method."
.format(type(val)))
raise TypeError("object of type '{}' does have a _mixture_ or _unitary_ "
"method, but it returned NotImplemented.".format(type(val)))
def has_mixture_channel(val: Any) -> bool:
"""Returns whether the value has a mixture channel representation.
In contrast to `has_mixture` this method falls back to checking whether
the value has a unitary representation via `has_channel`.
Returns:
If `val` has a `_has_mixture_` method and its result is not
NotImplemented, that result is returned. Otherwise, if `val` has a
`_has_unitary_` method and its results is not NotImplemented, that
result is returned. Otherwise, if the value has a `_mixture_` method
that is not a non-default value, True is returned. Returns False if none
of these functions.
"""
mixture_getter = getattr(val, '_has_mixture_', None)
result = NotImplemented if mixture_getter is None else mixture_getter()
if result is not NotImplemented:
return result
result = has_unitary(val)
if result is not NotImplemented and result:
return result
# No _has_mixture_ or _has_unitary_ function, use _mixture_ instead.
return mixture_channel(val, None) is not None
def validate_mixture(supports_mixture: SupportsMixture):
"""Validates that the mixture's tuple are valid probabilities."""
mixture_tuple = mixture(supports_mixture, None)
if mixture_tuple is None:
raise TypeError('{}_mixture did not have a _mixture_ method'.format(
supports_mixture))
def validate_probability(p, p_str):
if p < 0:
raise ValueError('{} was less than 0.'.format(p_str))
elif p > 1:
raise ValueError('{} was greater than 1.'.format(p_str))
total = 0.0
for p, val in mixture_tuple:
validate_probability(p, '{}\'s probability'.format(str(val)))
total += p
if not np.isclose(total, 1.0):
raise ValueError('Sum of probabilities of a mixture was not 1.0')
|
the-stack_0_10146 | """Config flow for HVV integration."""
import logging
from pygti.auth import GTI_DEFAULT_HOST
from pygti.exceptions import CannotConnect, InvalidAuth
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_OFFSET, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from .const import ( # pylint:disable=unused-import
CONF_FILTER,
CONF_REAL_TIME,
CONF_STATION,
DOMAIN,
)
from .hub import GTIHub
_LOGGER = logging.getLogger(__name__)
SCHEMA_STEP_USER = vol.Schema(
{
vol.Required(CONF_HOST, default=GTI_DEFAULT_HOST): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
SCHEMA_STEP_STATION = vol.Schema({vol.Required(CONF_STATION): str})
SCHEMA_STEP_OPTIONS = vol.Schema(
{
vol.Required(CONF_FILTER): vol.In([]),
vol.Required(CONF_OFFSET, default=0): cv.positive_int,
vol.Optional(CONF_REAL_TIME, default=True): bool,
}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for HVV."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize component."""
self.hub = None
self.data = None
self.stations = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
self.hub = GTIHub(
user_input[CONF_HOST],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
session,
)
try:
response = await self.hub.authenticate()
_LOGGER.debug("Init gti: %r", response)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
self.data = user_input
return await self.async_step_station()
return self.async_show_form(
step_id="user", data_schema=SCHEMA_STEP_USER, errors=errors
)
async def async_step_station(self, user_input=None):
"""Handle the step where the user inputs his/her station."""
if user_input is not None:
errors = {}
check_name = await self.hub.gti.checkName(
{"theName": {"name": user_input[CONF_STATION]}, "maxList": 20}
)
stations = check_name.get("results")
self.stations = {
f"{station.get('name')}": station
for station in stations
if station.get("type") == "STATION"
}
if not self.stations:
errors["base"] = "no_results"
return self.async_show_form(
step_id="station", data_schema=SCHEMA_STEP_STATION, errors=errors
)
# schema
return await self.async_step_station_select()
return self.async_show_form(step_id="station", data_schema=SCHEMA_STEP_STATION)
async def async_step_station_select(self, user_input=None):
"""Handle the step where the user inputs his/her station."""
schema = vol.Schema({vol.Required(CONF_STATION): vol.In(list(self.stations))})
if user_input is None:
return self.async_show_form(step_id="station_select", data_schema=schema)
self.data.update({"station": self.stations[user_input[CONF_STATION]]})
title = self.data[CONF_STATION]["name"]
return self.async_create_entry(title=title, data=self.data)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options flow handler."""
def __init__(self, config_entry):
"""Initialize HVV Departures options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.departure_filters = {}
self.hub = None
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if not self.departure_filters:
departure_list = {}
self.hub = self.hass.data[DOMAIN][self.config_entry.entry_id]
try:
departure_list = await self.hub.gti.departureList(
{
"station": self.config_entry.data[CONF_STATION],
"time": {"date": "heute", "time": "jetzt"},
"maxList": 5,
"maxTimeOffset": 200,
"useRealtime": True,
"returnFilters": True,
}
)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
self.departure_filters = {
str(i): departure_filter
for i, departure_filter in enumerate(departure_list.get("filter"))
}
if user_input is not None and not errors:
options = {
CONF_FILTER: [
self.departure_filters[x] for x in user_input[CONF_FILTER]
],
CONF_OFFSET: user_input[CONF_OFFSET],
CONF_REAL_TIME: user_input[CONF_REAL_TIME],
}
return self.async_create_entry(title="", data=options)
if CONF_FILTER in self.config_entry.options:
old_filter = [
i
for (i, f) in self.departure_filters.items()
if f in self.config_entry.options.get(CONF_FILTER)
]
else:
old_filter = []
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_FILTER, default=old_filter): cv.multi_select(
{
key: f"{departure_filter['serviceName']}, {departure_filter['label']}"
for key, departure_filter in self.departure_filters.items()
}
),
vol.Required(
CONF_OFFSET,
default=self.config_entry.options.get(CONF_OFFSET, 0),
): cv.positive_int,
vol.Optional(
CONF_REAL_TIME,
default=self.config_entry.options.get(CONF_REAL_TIME, True),
): bool,
}
),
errors=errors,
)
|
the-stack_0_10148 | # Copyright 2019 The Shaderc Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the expect module."""
import expect
from glslc_test_framework import TestStatus
import re
import unittest
class TestStdoutMatchADotC(expect.StdoutMatch):
expected_stdout = re.compile('a.c')
class TestExpect(unittest.TestCase):
def test_get_object_name(self):
"""Tests get_object_filename()."""
source_and_object_names = [('a.vert', 'a.vert.spv'),
('b.frag', 'b.frag.spv'),
('c.tesc', 'c.tesc.spv'),
('d.tese', 'd.tese.spv'),
('e.geom', 'e.geom.spv'),
('f.comp', 'f.comp.spv'),
('file', 'file.spv'), ('file.', 'file.spv'),
('file.uk',
'file.spv'), ('file.vert.',
'file.vert.spv'),
('file.vert.bla',
'file.vert.spv')]
actual_object_names = [
expect.get_object_filename(f[0]) for f in source_and_object_names
]
expected_object_names = [f[1] for f in source_and_object_names]
self.assertEqual(actual_object_names, expected_object_names)
def test_stdout_match_regex_has_match(self):
test = TestStdoutMatchADotC()
status = TestStatus(
test_manager=None,
returncode=0,
stdout=b'0abc1',
stderr=None,
directory=None,
inputs=None,
input_filenames=None)
self.assertTrue(test.check_stdout_match(status)[0])
def test_stdout_match_regex_no_match(self):
test = TestStdoutMatchADotC()
status = TestStatus(
test_manager=None,
returncode=0,
stdout=b'ab',
stderr=None,
directory=None,
inputs=None,
input_filenames=None)
self.assertFalse(test.check_stdout_match(status)[0])
def test_stdout_match_regex_empty_stdout(self):
test = TestStdoutMatchADotC()
status = TestStatus(
test_manager=None,
returncode=0,
stdout=b'',
stderr=None,
directory=None,
inputs=None,
input_filenames=None)
self.assertFalse(test.check_stdout_match(status)[0])
|
the-stack_0_10152 | from fuzzysearch import find_near_matches
from . import x_execer, filepath, x_env
import subprocess
def f_search(keyword: str, paths: [str]):
res = []
for p in paths:
if find_near_matches(keyword, p, max_l_dist=0) != []:
# ヒット
res.append(p)
return res
def interactive(l: [str]):
if len(l) <= 0:
return None
str_list = repr("\n".join(l))
tmpFile = x_env["ENHANCD_DIR"] / "tmp"
filterCmd = filepath.split_filterlist(x_env["ENHANCD_FILTER"])
if filterCmd is None:
return None
x_execer.eval(f'echo {str_list} | {filterCmd} > {str(tmpFile)}')
file = open(tmpFile, "r")
select = str(file.read())
file.close()
tmpFile.unlink()
if select is None:
return None
return select.rstrip("\n")
|
the-stack_0_10153 | import random
import argparse
from functools import partial
import numpy as np
import paddle
import paddle.distributed as dist
from paddle.io import DataLoader, DistributedBatchSampler, BatchSampler
from paddlenlp.data import Pad
# yapf: disable
def parse_args():
parser = argparse.ArgumentParser(__doc__)
parser.add_argument('--model_name_or_path', type=str, default='unified_transformer-12L-cn-luge', help='The path or shortcut name of the pre-trained model.')
parser.add_argument('--save_dir', type=str, default='./checkpoints', help='The directory where the checkpoints will be saved.')
parser.add_argument('--output_path', type=str, default='./predict.txt', help='The file path where the infer result will be saved.')
parser.add_argument('--logging_steps', type=int, default=100, help='Log every X updates steps.')
parser.add_argument('--save_steps', type=int, default=1000, help='Save checkpoint every X updates steps.')
parser.add_argument('--seed', type=int, default=2021, help='Random seed for initialization.')
parser.add_argument('--batch_size', type=int, default=16, help='Batch size per GPU/CPU for training.')
parser.add_argument('--lr', type=float, default=5e-5, help='The initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=0.01, help='The weight decay for optimizer.')
parser.add_argument('--epochs', type=int, default=3, help='Total number of training epochs to perform.')
parser.add_argument('--warmup_steps', type=int, default=2500, help='The number of warmup steps.')
parser.add_argument('--max_grad_norm', type=float, default=0.1, help='The max value of grad norm.')
parser.add_argument('--max_seq_len', type=int, default=512, help='The maximum sequence length of training.')
parser.add_argument('--max_response_len', type=int, default=128, help='The maximum response sequence length of training.')
parser.add_argument('--max_knowledge_len', type=int, default=256, help='The maximum knowledge sequence length of training.')
parser.add_argument('--min_dec_len', type=int, default=1, help='The minimum sequence length of generation.')
parser.add_argument('--max_dec_len', type=int, default=64, help='The maximum sequence length of generation.')
parser.add_argument('--num_samples', type=int, default=1, help='The decode numbers in generation.')
parser.add_argument('--decode_strategy', type=str, default='sampling', help='The decode strategy in generation.')
parser.add_argument('--top_k', type=int, default=0, help='The number of highest probability vocabulary tokens to keep for top-k sampling.')
parser.add_argument('--temperature', type=float, default=1.0, help='The value used to module the next token probabilities.')
parser.add_argument('--top_p', type=float, default=1.0, help='The cumulative probability for top-p sampling.')
parser.add_argument('--num_beams', type=int, default=0, help='The number of beams for beam search.')
parser.add_argument('--length_penalty', type=float, default=1.0, help='The exponential penalty to the sequence length for beam search.')
parser.add_argument('--early_stopping', type=eval, default=False, help='Whether to stop the beam search when at least `num_beams` sentences are finished per batch or not.')
parser.add_argument('--device', type=str, default='gpu', help='The device to select for training the model.')
args = parser.parse_args()
return args
# yapf: enable
def print_args(args):
print('----------- Configuration Arguments -----------')
for arg, value in sorted(vars(args).items()):
print('%s: %s' % (arg, value))
print('------------------------------------------------')
def set_seed(seed):
# Use the same data seed(for data shuffle) for all procs to guarantee data
# consistency after sharding.
random.seed(seed)
np.random.seed(seed)
# Maybe different op seeds(for dropout) for different procs is better.
paddle.seed(seed + dist.get_rank())
def preprocess_examples(examples, mode='train'):
"""
For training set and dev set, treat each utterance of the first speaker as
the response, and concatenate the goal, knowledge and the dialog’s previous
utterances as the history. In this way, multiple history-response pairs
are constructed.
"""
if mode == 'test':
return examples
new_examples = []
for example in examples:
conversation = example['conversation']
for i in range(0, len(conversation), 2):
new_examples.append({
'goal': example['goal'],
'knowledge': example['knowledge'],
'history': conversation[:i],
'response': conversation[i]
})
return new_examples
def convert_example(example,
tokenizer,
max_seq_len=512,
max_response_len=128,
max_knowledge_len=256,
mode='train'):
"""Convert all examples into necessary features."""
goal = example['goal']
knowledge = example['knowledge']
goal_knowledge = ' '.join([' '.join(lst) for lst in goal + knowledge])
if mode != 'test':
tokenized_example = tokenizer.dialogue_encode(
example['history'],
response=example['response'],
knowledge=goal_knowledge,
task_type='knowledge',
max_seq_len=max_seq_len,
max_response_len=max_response_len,
max_knowledge_len=max_knowledge_len,
return_length=True)
response_start = tokenized_example['input_ids'].index(
tokenizer.cls_token_id, 1)
response_end = tokenized_example['seq_len']
# Use to gather the logits corresponding to the labels during training
tokenized_example['masked_positions'] = list(
range(response_start, response_end - 1))
tokenized_example['labels'] = tokenized_example['input_ids'][
response_start + 1:response_end]
return tokenized_example
else:
tokenized_example = tokenizer.dialogue_encode(
example['history'],
knowledge=goal_knowledge,
task_type='knowledge',
max_seq_len=max_seq_len,
max_knowledge_len=max_knowledge_len,
add_start_token_as_response=True)
if 'response' in example:
tokenized_example['response'] = example['response']
return tokenized_example
def batchify_fn(batch_examples, pad_val, mode):
def pad_mask(batch_attention_mask):
batch_size = len(batch_attention_mask)
max_len = max(map(len, batch_attention_mask))
attention_mask = np.ones(
(batch_size, max_len, max_len), dtype='float32') * -1e9
for i, mask_data in enumerate(attention_mask):
seq_len = len(batch_attention_mask[i])
mask_data[-seq_len:, -seq_len:] = np.array(
batch_attention_mask[i], dtype='float32')
# In order to ensure the correct broadcasting mechanism, expand one
# dimension to the second dimension (n_head of Transformer).
attention_mask = np.expand_dims(attention_mask, axis=1)
return attention_mask
pad_func = Pad(pad_val=pad_val, pad_right=False)
input_ids = pad_func([example['input_ids'] for example in batch_examples])
token_type_ids = pad_func(
[example['token_type_ids'] for example in batch_examples])
position_ids = pad_func(
[example['position_ids'] for example in batch_examples])
attention_mask = pad_mask(
[example['attention_mask'] for example in batch_examples])
if mode != 'test':
max_len = max([example['seq_len'] for example in batch_examples])
masked_positions = np.concatenate([
np.array(example['masked_positions']) +
(max_len - example['seq_len']) + i * max_len
for i, example in enumerate(batch_examples)
])
labels = np.concatenate(
[np.array(example['labels']) for example in batch_examples])
return input_ids, token_type_ids, position_ids, attention_mask, masked_positions, labels
else:
return input_ids, token_type_ids, position_ids, attention_mask
def create_data_loader(dataset, tokenizer, args, mode):
trans_func1 = partial(preprocess_examples, mode=mode)
trans_func2 = partial(
convert_example,
tokenizer=tokenizer,
max_seq_len=args.max_seq_len,
max_response_len=args.max_response_len,
max_knowledge_len=args.max_knowledge_len,
mode=mode)
dataset = dataset.map(trans_func1, batched=True).map(trans_func2, lazy=True)
if mode == 'train':
batch_sampler = DistributedBatchSampler(
dataset, batch_size=args.batch_size, shuffle=True)
else:
batch_sampler = BatchSampler(
dataset, batch_size=args.batch_size, shuffle=False)
collate_fn = partial(batchify_fn, pad_val=tokenizer.pad_token_id, mode=mode)
data_loader = DataLoader(
dataset,
batch_sampler=batch_sampler,
collate_fn=collate_fn,
return_list=True)
return dataset, data_loader
def post_process_response(token_ids, tokenizer):
"""Post-process the decoded sequence. Truncate from the first <eos>."""
eos_pos = len(token_ids)
for i, tok_id in enumerate(token_ids):
if tok_id == tokenizer.sep_token_id:
eos_pos = i
break
token_ids = token_ids[:eos_pos]
tokens = tokenizer.convert_ids_to_tokens(token_ids)
tokens = tokenizer.merge_subword(tokens)
return token_ids, tokens
def get_in_turn_repetition(pred, is_cn=False):
"""Get in-turn repetition."""
if len(pred) == 0:
return 1.0
if isinstance(pred[0], str):
pred = [tok.lower() for tok in pred]
if is_cn:
pred = "".join(pred)
tri_grams = set()
for i in range(len(pred) - 2):
tri_gram = tuple(pred[i:i + 3])
if tri_gram in tri_grams:
return True
tri_grams.add(tri_gram)
return False
def select_response(ids, scores, tokenizer, max_dec_len=None, num_samples=1):
ids = ids.numpy().tolist()
scores = scores.numpy()
if len(ids) != len(scores) or (len(ids) % num_samples) != 0:
raise ValueError(
"the length of `ids` is {}, but the `num_samples` is {}".format(
len(ids), num_samples))
group = []
tmp = []
for pred, score in zip(ids, scores):
pred_token_ids, pred_tokens = post_process_response(pred, tokenizer)
num_token = len(pred_token_ids)
response = " ".join(pred_tokens)
in_turn_repetition = get_in_turn_repetition(
pred_tokens, True) or get_in_turn_repetition(pred_token_ids)
# not ending
if max_dec_len is not None and num_token >= max_dec_len:
score -= 1e3
elif in_turn_repetition:
score -= 1e3
tmp.append([response, score])
if len(tmp) == num_samples:
group.append(tmp)
tmp = []
results = []
for preds in group:
preds = sorted(preds, key=lambda x: -x[1])
results.append(preds[0][0])
return results
|
the-stack_0_10155 | #!/usr/bin/python
#
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example creates a flash display creative.
Requires a flash asset, backup image asset, and an advertiser ID as input.
To get an advertiser ID, run get_advertisers.py.
"""
import argparse
import sys
from apiclient.http import MediaFileUpload
from oauth2client import client
import dfareporting_utils
# Declare command-line flags.
argparser = argparse.ArgumentParser(add_help=False)
argparser.add_argument(
'profile_id', type=int,
help='The ID of the profile to add a user role for')
argparser.add_argument(
'advertiser_id', type=int,
help='The ID of the advertiser to associate this creative with.')
argparser.add_argument(
'size_id', type=int,
help='The ID of the size of this creative.')
argparser.add_argument(
'flash_asset_name',
help='Suggested name to use for the uploaded creative asset.')
argparser.add_argument(
'path_to_flash_asset_file',
help='Path to the asset file to be uploaded.')
argparser.add_argument(
'backup_image_name',
help='Suggested name to use for the uploaded creative asset.')
argparser.add_argument(
'path_to_backup_image_file',
help='Path to the asset file to be uploaded.')
def main(argv):
# Retrieve command line arguments.
flags = dfareporting_utils.get_arguments(argv, __doc__, parents=[argparser])
# Authenticate and construct service.
service = dfareporting_utils.setup(flags)
profile_id = flags.profile_id
advertiser_id = flags.advertiser_id
backup_image_name = flags.backup_image_name
flash_asset_name = flags.flash_asset_name
path_to_backup_image_file = flags.path_to_backup_image_file
path_to_flash_asset_file = flags.path_to_flash_asset_file
size_id = flags.size_id
try:
# Upload the flash asset
flash_asset_id = upload_creative_asset(
service, profile_id, advertiser_id, flash_asset_name,
path_to_flash_asset_file, 'FLASH')
# Upload the backup image asset
backup_image_asset_id = upload_creative_asset(
service, profile_id, advertiser_id, backup_image_name,
path_to_backup_image_file, 'HTML_IMAGE')
# Construct the creative structure.
creative = {
'advertiserId': advertiser_id,
'backupImageClickThroughUrl': 'https://www.google.com',
'backupImageReportingLabel': 'backup_image_exit',
'backupImageTargetWindow': {'targetWindowOption': 'NEW_WINDOW'},
'clickTags': [{
'eventName': 'exit',
'name': 'click_tag',
'value': 'https://www.google.com'
}],
'creativeAssets': [
{'assetIdentifier': flash_asset_id, 'role': 'PRIMARY',
'windowMode': 'TRANSPARENT'},
{'assetIdentifier': backup_image_asset_id, 'role': 'BACKUP_IMAGE'},
],
'name': 'Test flash display creative',
'size': {'id': size_id},
'type': 'ENHANCED_BANNER'
}
request = service.creatives().insert(profileId=profile_id, body=creative)
# Execute request and print response.
response = request.execute()
print ('Created flash display creative with ID %s and name "%s".'
% (response['id'], response['name']))
except client.AccessTokenRefreshError:
print ('The credentials have been revoked or expired, please re-run the '
'application to re-authorize')
def upload_creative_asset(
service, profile_id, advertiser_id, asset_name, path_to_asset_file,
asset_type):
"""Uploads a creative asset and returns an assetIdentifier."""
# Construct the creative asset metadata
creative_asset = {
'assetIdentifier': {
'name': asset_name,
'type': asset_type
}
}
media = MediaFileUpload(path_to_asset_file)
if not media.mimetype():
media = MediaFileUpload(path_to_asset_file, 'application/octet-stream')
response = service.creativeAssets().insert(
advertiserId=advertiser_id,
profileId=profile_id,
media_body=media,
body=creative_asset).execute()
return response['assetIdentifier']
if __name__ == '__main__':
main(sys.argv)
|
the-stack_0_10157 | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from heatclient.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('heatclient'.upper() + '_LOCALEDIR')
_t = gettext.translation('heatclient', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('heatclient' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='heatclient')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='heatclient' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='heatclient', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
params = self._trim_dictionary_parameters(other)
else:
params = self._copy_param(other)
return params
def _trim_dictionary_parameters(self, dict_param):
"""Return a dict that only has matching entries in the msgid."""
# NOTE(luisg): Here we trim down the dictionary passed as parameters
# to avoid carrying a lot of unnecessary weight around in the message
# object, for example if someone passes in Message() % locals() but
# only some params are used, and additionally we prevent errors for
# non-deepcopyable objects by unicoding() them.
# Look for %(param) keys in msgid;
# Skip %% and deal with the case where % is first character on the line
keys = re.findall('(?:[^%]|^)?%\((\w*)\)[a-z]', self.msgid)
# If we don't find any %(param) keys but have a %s
if not keys and re.findall('(?:[^%]|^)%[a-z]', self.msgid):
# Apparently the full dictionary is the parameter
params = self._copy_param(dict_param)
else:
params = {}
# Save our existing parameters as defaults to protect
# ourselves from losing values if we are called through an
# (erroneous) chain that builds a valid Message with
# arguments, and then does something like "msg % kwds"
# where kwds is an empty dictionary.
src = {}
if isinstance(self.params, dict):
src.update(self.params)
src.update(dict_param)
for key in keys:
params[key] = self._copy_param(src[key])
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except TypeError:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
|
the-stack_0_10158 | #!/usr/bin/env python
import sys
import os.path
from os.path import join as PJ
import re
import json
import numpy as np
from tqdm import tqdm
import igraph as ig
import louvain
import math
import jgf
import graph_tool as gt;
import graph_tool.inference as gtInference;
# import infomap
def isFloat(value):
if(value is None):
return False
try:
numericValue = float(value)
return np.isfinite(numericValue)
except ValueError:
return False
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, (np.int_, np.intc, np.intp, np.int8,
np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)):
ret = int(obj)
elif isinstance(obj, (np.float_, np.float16, np.float32, np.float64)):
ret = float(obj)
elif isinstance(obj, (np.ndarray,)):
ret = obj.tolist()
else:
ret = json.JSONEncoder.default(self, obj)
if isinstance(ret, (float)):
if math.isnan(ret):
ret = None
if isinstance(ret, (bytes, bytearray)):
ret = ret.decode("utf-8")
return ret
results = {"errors": [], "warnings": [], "brainlife": [], "datatype_tags": [], "tags": []}
def warning(msg):
global results
results['warnings'].append(msg)
#results['brainlife'].append({"type": "warning", "msg": msg})
print(msg)
def error(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
def exitApp():
global results
with open("product.json", "w") as fp:
json.dump(results, fp, cls=NumpyEncoder)
if len(results["errors"]) > 0:
sys.exit(1)
else:
sys.exit()
def exitAppWithError(msg):
global results
results['errors'].append(msg)
#results['brainlife'].append({"type": "error", "msg": msg})
print(msg)
exitApp()
def louvain_find_partition_multiplex(graphs, partition_type,layer_weights=None, seed=None, **kwargs):
""" Detect communities for multiplex graphs.
Each graph should be defined on the same set of vertices, only the edges may
differ for different graphs. See
:func:`Optimiser.optimise_partition_multiplex` for a more detailed
explanation.
Parameters
----------
graphs : list of :class:`ig.Graph`
List of :class:`louvain.VertexPartition` layers to optimise.
partition_type : type of :class:`MutableVertexPartition`
The type of partition to use for optimisation (identical for all graphs).
seed : int
Seed for the random number generator. By default uses a random seed
if nothing is specified.
**kwargs
Remaining keyword arguments, passed on to constructor of ``partition_type``.
Returns
-------
list of int
membership of nodes.
float
Improvement in quality of combined partitions, see
:func:`Optimiser.optimise_partition_multiplex`.
Notes
-----
We don't return a partition in this case because a partition is always
defined on a single graph. We therefore simply return the membership (which
is the same for all layers).
See Also
--------
:func:`Optimiser.optimise_partition_multiplex`
:func:`slices_to_layers`
Examples
--------
>>> n = 100
>>> G_1 = ig.Graph.Lattice([n], 1)
>>> G_2 = ig.Graph.Lattice([n], 1)
>>> membership, improvement = louvain.find_partition_multiplex([G_1, G_2],
... louvain.ModularityVertexPartition)
"""
n_layers = len(graphs)
partitions = []
if(layer_weights is None):
layer_weights = [1]*n_layers
for graph in graphs:
partitions.append(partition_type(graph, **kwargs))
optimiser = louvain.Optimiser()
if (not seed is None):
optimiser.set_rng_seed(seed)
improvement = optimiser.optimise_partition_multiplex(partitions, layer_weights)
return partitions[0].membership, improvement
def SBMMinimizeMembership(graphs,layerWeights=None, weightMode = "real-normal"):
layered = (len(graphs)>1)
graph = graphs[0]
vertexCount = graph.vcount()
g = gt.Graph(directed=graph.is_directed())
for _ in range(vertexCount):
g.add_vertex()
weighted = "weights" in graph.edge_attributes()
if(weighted):
weightsProperty = g.new_edge_property("double")
if(layered):
layerProperty = g.new_edge_property("int32_t")
for graphIndex, graph in enumerate(graphs):
if(weighted):
weightMultiplier = 1
if(layered and layerWeights is not None):
weightMultiplier = layerWeights[graphIndex]
for edge in graph.es:
gedge = g.add_edge(edge.source,edge.target)
if(weighted):
weight = weightMultiplier*edge["weight"]
weightsProperty[gedge] = weight
if(layered):
layerProperty[gedge] = graphIndex
if(weighted):
g.edge_properties["weight"] = weightsProperty
if(layered):
g.edge_properties["layer"] = layerProperty
state_args = {}
if(weighted):
state_args["recs"] = [g.ep.weight]
state_args["rec_types"] = [weightMode]
if(layered):
state_args["ec"] = g.ep.layer
state_args["layers"] = True
# print(state_args)
state = gtInference.minimize.minimize_blockmodel_dl(g,deg_corr=True, layers=layered, state_args=state_args);
return list(state.get_blocks())
# def infomapMembership(g, parameters):
# vertexCount = g.vcount()
# infomapSimple = infomap.Infomap(parameters)
# infomapSimple.setVerbosity(0)
# infoNetwork = infomapSimple.network()
# for nodeIndex in range(0,vertexCount):
# infoNetwork.addNode(nodeIndex)
# weighted = "weights" in g.edge_attributes()
# for edge in edges:
# weight = 1.0
# if(weighted):
# weight = edge["weight"];
# infoNetwork.addLink(edge.source, edge.target)
# infomapSimple.run()
# membership = [0]*vertexCount
# # print("Result")
# # print("\n#node module")
# for node in infomapSimple.iterTree():
# if node.isLeaf():
# # print((node.physicalId,node.moduleIndex()));
# membership[node.physicalId] = node.moduleIndex();
# return membership;
configFilename = "config.json"
argCount = len(sys.argv)
if(argCount > 1):
configFilename = sys.argv[1]
outputDirectory = "output"
outputFile = PJ(outputDirectory,"network.json.gz")
if(not os.path.exists(outputDirectory)):
os.makedirs(outputDirectory)
with open(configFilename, "r") as fd:
config = json.load(fd)
communiMethod = "louvain"
infomap_trials = 10
louvain_resolution = 1.0
louvain_quality_function = "modularity"
assymetricNegativeWeights = True
if("method" in config):
communiMethod = config["method"].lower()
if("louvain-quality-function" in config and config["louvain-quality-function"]):
louvain_quality_function = config["louvain-quality-function"].lower()
if("louvain-resolution" in config and isFloat(config["louvain-resolution"])):
louvain_resolution = float(config["louvain-resolution"])
if("infomap-trials" in config and config["infomap-trials"]):
infomap_trials = int(config["infomap-trials"])
if("assymetric-negative" in config):
assymetricNegativeWeights = config["assymetric-negative"]
networks = jgf.igraph.load(config["network"], compressed=True)
outputNetworks = []
for network in tqdm(networks):
weighted = "weight" in network.edge_attributes()
layered = False
if(weighted):
signed = np.any(np.array(network.es["weight"])<0)
if(signed):
network_pos = network.subgraph_edges(network.es.select(weight_gt = 0), delete_vertices=False)
network_neg = network.subgraph_edges(network.es.select(weight_lt = 0), delete_vertices=False)
network_neg.es['weight'] = [-w for w in network_neg.es['weight']]
layerNetworks = [network_pos,network_neg]
layerWeights = [1,-1]
layerNames = ["positive","negative"]
layered=True
if("layer" in network.edge_attributes()):
if("edge-layer-weights" in network.attributes()):
layerNames = list(network["edge-layer-weights"].keys())
layerWeights = list(network["edge-layer-weights"].values())
else:
layerNames = list(set(network.es["layer"]))
layerWeights = [1]*len(layerNames)
layerNetworks = []
for layerIndex,layerName in enumerate(layerNames):
layerNetwork = network.subgraph_edges(network.es.select(layer_eq = layerName), delete_vertices=False)
layerNetworks.append(layerNetwork)
layered = True
if(communiMethod=="louvain"):
# optimiser = louvain.Optimiser()
# diff = optimiser.optimise_partition_multiplex(
# [part_pos, part_neg]
hasResolution = False
if(layered):
modularityWeights = layerWeights
partitionFunction = louvain.ModularityVertexPartition
if(louvain_quality_function=="modularity"):
partitionFunction = louvain.ModularityVertexPartition
if(layered and assymetricNegativeWeights):
layerSizes = [g.ecount() for g in layerNetworks]
allCount = np.sum(layerSizes)
modularityWeights = [layerWeights[layerIndex]*layerSizes[layerIndex]/allCount for layerIndex in range(len(layerWeights))]
modularityWeights[0] = 1.0
elif(louvain_quality_function=="rbconfiguration"):
partitionFunction = louvain.RBConfigurationVertexPartition
hasResolution = True
if(layered and assymetricNegativeWeights):
layerSizes = [g.ecount() for g in layerNetworks]
allCount = np.sum(layerSizes)
modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))]
modularityWeights[0] = 1.0/layerWeights[0]
elif(louvain_quality_function=="rber"):
partitionFunction = louvain.RBERVertexPartition
hasResolution = True
if(layered and assymetricNegativeWeights):
layerSizes = [g.ecount() for g in layerNetworks]
allCount = np.sum(layerSizes)
modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))]
modularityWeights[0] = 1.0/layerWeights[0]
elif(louvain_quality_function=="cpm"):
partitionFunction = louvain.CPMVertexPartition
hasResolution = True
if(layered and assymetricNegativeWeights):
layerSizes = [g.ecount() for g in layerNetworks]
allCount = np.sum(layerSizes)
modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))]
modularityWeights[0] = 1.0/layerWeights[0]
elif(louvain_quality_function=="significance"):
partitionFunction = louvain.SignificanceVertexPartition
hasResolution = False
if(weighted):
exitAppWithError("Significance quality does not work for weighted networks")
elif(louvain_quality_function=="surprise"):
partitionFunction = louvain.SurpriseVertexPartition
hasResolution = False
if(layered and assymetricNegativeWeights):
layerSizes = [g.ecount() for g in layerNetworks]
allCount = np.sum(layerSizes)
modularityWeights = [layerWeights[layerIndex]/allCount for layerIndex in range(len(layerWeights))]
modularityWeights[0] = 1.0/layerWeights[0]
else:
exitAppWithError("Invalid louvain method.")
if(layered):
if(hasResolution):
membership, improv = louvain_find_partition_multiplex(layerNetworks,partitionFunction,
layer_weights=modularityWeights,resolution_parameter=louvain_resolution,weights="weight")
else:
membership, improv = louvain_find_partition_multiplex(layerNetworks,partitionFunction,
layer_weights=modularityWeights,weights="weight")
else:
if(hasResolution):
membership = louvain.find_partition(network,partitionFunction,
weights="weight",resolution_parameter=louvain_resolution).membership
else:
membership = louvain.find_partition(network,partitionFunction,
weights="weight").membership
elif(communiMethod=="infomap"):
if(signed):
exitAppWithError("Infomap does not work for negative weights.")
else:
membership = network.community_infomap(edge_weights="weight",trials=infomap_trials).membership
elif(communiMethod=="sbm"):
if(layered):
membership = SBMMinimizeMembership(layerNetworks,layerWeights = layerWeights)
else:
membership = SBMMinimizeMembership([network])
else:
exitAppWithError("Invalid community detection method (%s)."%communiMethod)
network.vs["Community"] = membership
outputNetworks.append(network)
jgf.igraph.save(outputNetworks, outputFile, compressed=True)
exitApp() |
the-stack_0_10159 | #!/usr/bin/env python
#
# Copyright (c) 2009-2013, Luke Maurits <[email protected]>
# All rights reserved.
# With contributions from:
# * Chris Clark
# * Klein Stephane
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * The name of the author may not be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__version__ = '0.7.2'
import copy
import csv
import random
import re
import sys
import textwrap
import unicodedata
# from ax.utils.six.moves import zip, map
py3k = sys.version_info[0] >= 3
if py3k:
str = str
str = str
uni_chr = chr
from html.parser import HTMLParser
else:
uni_chr = chr
from html.parser import HTMLParser
if py3k and sys.version_info[1] >= 2:
from html import escape
else:
from cgi import escape
# hrule styles
FRAME = 0
ALL = 1
NONE = 2
HEADER = 3
# Table styles
DEFAULT = 10
MSWORD_FRIENDLY = 11
PLAIN_COLUMNS = 12
RANDOM = 20
_re = re.compile('\\033\\[[0-9;]*m')
def _get_size(text):
lines = text.split('\n')
height = len(lines)
width = max([_str_block_width(line) for line in lines])
return (width, height)
class PrettyTable(object):
def __init__(self, field_names=None, **kwargs):
"""Return a new PrettyTable instance
Arguments:
encoding - Unicode encoding scheme used to decode any encoded input
field_names - list or tuple of field names
fields - list or tuple of field names to include in displays
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
header - print a header showing field names (True or False)
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: FRAME, HEADER, ALL, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
valign - default valign for each row (None, "t", "m" or "b")
reversesort - True or False to sort in descending or ascending order"""
self.encoding = kwargs.get('encoding', 'UTF-8')
# Data
self._field_names = []
self._align = {}
self._valign = {}
self._max_width = {}
self._rows = []
if field_names:
self.field_names = field_names
else:
self._widths = []
# Options
self._options = 'start end fields header border sortby reversesort sort_key attributes format hrules vrules'.split()
self._options.extend(
'int_format float_format padding_width left_padding_width right_padding_width'.split()
)
self._options.extend(
'vertical_char horizontal_char junction_char header_style valign xhtml print_empty'.split()
)
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
else:
kwargs[option] = None
self._start = kwargs['start'] or 0
self._end = kwargs['end'] or None
self._fields = kwargs['fields'] or None
if kwargs['header'] in (True, False):
self._header = kwargs['header']
else:
self._header = True
self._header_style = kwargs['header_style'] or None
if kwargs['border'] in (True, False):
self._border = kwargs['border']
else:
self._border = True
self._hrules = kwargs['hrules'] or FRAME
self._vrules = kwargs['vrules'] or ALL
self._sortby = kwargs['sortby'] or None
if kwargs['reversesort'] in (True, False):
self._reversesort = kwargs['reversesort']
else:
self._reversesort = False
self._sort_key = kwargs['sort_key'] or (lambda x: x)
self._int_format = kwargs['int_format'] or {}
self._float_format = kwargs['float_format'] or {}
self._padding_width = kwargs['padding_width'] or 1
self._left_padding_width = kwargs['left_padding_width'] or None
self._right_padding_width = kwargs['right_padding_width'] or None
self._vertical_char = kwargs['vertical_char'] or self._unicode('|')
self._horizontal_char = kwargs['horizontal_char'] or self._unicode('-')
self._junction_char = kwargs['junction_char'] or self._unicode('+')
if kwargs['print_empty'] in (True, False):
self._print_empty = kwargs['print_empty']
else:
self._print_empty = True
self._format = kwargs['format'] or False
self._xhtml = kwargs['xhtml'] or False
self._attributes = kwargs['attributes'] or {}
def _unicode(self, value):
if not isinstance(value, str):
value = str(value)
if not isinstance(value, str):
value = str(value, self.encoding, 'strict')
return value
def _justify(self, text, width, align):
excess = width - _str_block_width(text)
if align == 'l':
return text + excess * ' '
elif align == 'r':
return excess * ' ' + text
else:
if excess % 2:
# Uneven padding
# Put more space on right if text is of odd length...
if _str_block_width(text) % 2:
return (excess // 2) * ' ' + text + (excess // 2 + 1) * ' '
# and more space on left if text is of even length
else:
return (excess // 2 + 1) * ' ' + text + (excess // 2) * ' '
# Why distribute extra space this way? To match the behaviour of
# the inbuilt str.center() method.
else:
# Equal padding on either side
return (excess // 2) * ' ' + text + (excess // 2) * ' '
def __getattr__(self, name):
if name == 'rowcount':
return len(self._rows)
elif name == 'colcount':
if self._field_names:
return len(self._field_names)
elif self._rows:
return len(self._rows[0])
else:
return 0
else:
raise AttributeError(name)
def __getitem__(self, index):
new = PrettyTable()
new.field_names = self.field_names
for attr in self._options:
setattr(new, '_' + attr, getattr(self, '_' + attr))
setattr(new, '_align', getattr(self, '_align'))
if isinstance(index, slice):
for row in self._rows[index]:
new.add_row(row)
elif isinstance(index, int):
new.add_row(self._rows[index])
else:
raise Exception(
'Index %s is invalid, must be an integer or slice' % str(index)
)
return new
if py3k:
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode(self.encoding)
def __unicode__(self):
return self.get_string()
##############################
# ATTRIBUTE VALIDATORS #
##############################
# The method _validate_option is all that should be used elsewhere in the code base to validate options.
# It will call the appropriate validation method for that option. The individual validation methods should
# never need to be called directly (although nothing bad will happen if they *are*).
# Validation happens in TWO places.
# Firstly, in the property setters defined in the ATTRIBUTE MANAGMENT section.
# Secondly, in the _get_options method, where keyword arguments are mixed with persistent settings
def _validate_option(self, option, val):
if option in ('field_names'):
self._validate_field_names(val)
elif option in (
'start',
'end',
'max_width',
'padding_width',
'left_padding_width',
'right_padding_width',
'format',
):
self._validate_nonnegative_int(option, val)
elif option in ('sortby'):
self._validate_field_name(option, val)
elif option in ('sort_key'):
self._validate_function(option, val)
elif option in ('hrules'):
self._validate_hrules(option, val)
elif option in ('vrules'):
self._validate_vrules(option, val)
elif option in ('fields'):
self._validate_all_field_names(option, val)
elif option in ('header', 'border', 'reversesort', 'xhtml', 'print_empty'):
self._validate_true_or_false(option, val)
elif option in ('header_style'):
self._validate_header_style(val)
elif option in ('int_format'):
self._validate_int_format(option, val)
elif option in ('float_format'):
self._validate_float_format(option, val)
elif option in ('vertical_char', 'horizontal_char', 'junction_char'):
self._validate_single_char(option, val)
elif option in ('attributes'):
self._validate_attributes(option, val)
else:
raise Exception('Unrecognised option: %s!' % option)
def _validate_field_names(self, val):
# Check for appropriate length
if self._field_names:
try:
assert len(val) == len(self._field_names)
except AssertionError:
raise Exception(
'Field name list has incorrect number of values, (actual) %d!=%d (expected)'
% (len(val), len(self._field_names))
)
if self._rows:
try:
assert len(val) == len(self._rows[0])
except AssertionError:
raise Exception(
'Field name list has incorrect number of values, (actual) %d!=%d (expected)'
% (len(val), len(self._rows[0]))
)
# Check for uniqueness
try:
assert len(val) == len(set(val))
except AssertionError:
raise Exception('Field names must be unique!')
def _validate_header_style(self, val):
try:
assert val in ('cap', 'title', 'upper', 'lower', None)
except AssertionError:
raise Exception(
'Invalid header style, use cap, title, upper, lower or None!'
)
def _validate_align(self, val):
try:
assert val in ['l', 'c', 'r']
except AssertionError:
raise Exception('Alignment %s is invalid, use l, c or r!' % val)
def _validate_valign(self, val):
try:
assert val in ['t', 'm', 'b', None]
except AssertionError:
raise Exception('Alignment %s is invalid, use t, m, b or None!' % val)
def _validate_nonnegative_int(self, name, val):
try:
assert int(val) >= 0
except AssertionError:
raise Exception('Invalid value for %s: %s!' % (name, self._unicode(val)))
def _validate_true_or_false(self, name, val):
try:
assert val in (True, False)
except AssertionError:
raise Exception('Invalid value for %s! Must be True or False.' % name)
def _validate_int_format(self, name, val):
if val == '':
return
try:
assert type(val) in (str, str)
assert val.isdigit()
except AssertionError:
raise Exception(
'Invalid value for %s! Must be an integer format string.' % name
)
def _validate_float_format(self, name, val):
if val == '':
return
try:
assert type(val) in (str, str)
assert '.' in val
bits = val.split('.')
assert len(bits) <= 2
assert bits[0] == '' or bits[0].isdigit()
assert bits[1] == '' or bits[1].isdigit()
except AssertionError:
raise Exception(
'Invalid value for %s! Must be a float format string.' % name
)
def _validate_function(self, name, val):
try:
assert hasattr(val, '__call__')
except AssertionError:
raise Exception('Invalid value for %s! Must be a function.' % name)
def _validate_hrules(self, name, val):
try:
assert val in (ALL, FRAME, HEADER, NONE)
except AssertionError:
raise Exception(
'Invalid value for %s! Must be ALL, FRAME, HEADER or NONE.' % name
)
def _validate_vrules(self, name, val):
try:
assert val in (ALL, FRAME, NONE)
except AssertionError:
raise Exception(
'Invalid value for %s! Must be ALL, FRAME, or NONE.' % name
)
def _validate_field_name(self, name, val):
try:
assert (val in self._field_names) or (val is None)
except AssertionError:
raise Exception('Invalid field name: %s!' % val)
def _validate_all_field_names(self, name, val):
try:
for x in val:
self._validate_field_name(name, x)
except AssertionError:
raise Exception('fields must be a sequence of field names!')
def _validate_single_char(self, name, val):
return
try:
assert _str_block_width(val) == 1
except AssertionError:
raise Exception(
'Invalid value for %s! Must be a string of length 1.' % name
)
def _validate_attributes(self, name, val):
try:
assert isinstance(val, dict)
except AssertionError:
raise Exception('attributes must be a dictionary of name/value pairs!')
##############################
# ATTRIBUTE MANAGEMENT #
##############################
def _get_field_names(self):
return self._field_names
"""The names of the fields
Arguments:
fields - list or tuple of field names"""
def _set_field_names(self, val):
val = [self._unicode(x) for x in val]
self._validate_option('field_names', val)
if self._field_names:
old_names = self._field_names[:]
self._field_names = val
if self._align and old_names:
for old_name, new_name in zip(old_names, val):
self._align[new_name] = self._align[old_name]
for old_name in old_names:
if old_name not in self._align:
self._align.pop(old_name)
else:
for field in self._field_names:
self._align[field] = 'c'
if self._valign and old_names:
for old_name, new_name in zip(old_names, val):
self._valign[new_name] = self._valign[old_name]
for old_name in old_names:
if old_name not in self._valign:
self._valign.pop(old_name)
else:
for field in self._field_names:
self._valign[field] = 't'
field_names = property(_get_field_names, _set_field_names)
def _get_align(self):
return self._align
def _set_align(self, val):
self._validate_align(val)
for field in self._field_names:
self._align[field] = val
align = property(_get_align, _set_align)
def _get_valign(self):
return self._valign
def _set_valign(self, val):
self._validate_valign(val)
for field in self._field_names:
self._valign[field] = val
valign = property(_get_valign, _set_valign)
def _get_max_width(self):
return self._max_width
def _set_max_width(self, val):
self._validate_option('max_width', val)
for field in self._field_names:
self._max_width[field] = val
max_width = property(_get_max_width, _set_max_width)
def _get_fields(self):
"""List or tuple of field names to include in displays
Arguments:
fields - list or tuple of field names to include in displays"""
return self._fields
def _set_fields(self, val):
self._validate_option('fields', val)
self._fields = val
fields = property(_get_fields, _set_fields)
def _get_start(self):
"""Start index of the range of rows to print
Arguments:
start - index of first data row to include in output"""
return self._start
def _set_start(self, val):
self._validate_option('start', val)
self._start = val
start = property(_get_start, _set_start)
def _get_end(self):
"""End index of the range of rows to print
Arguments:
end - index of last data row to include in output PLUS ONE (list slice style)"""
return self._end
def _set_end(self, val):
self._validate_option('end', val)
self._end = val
end = property(_get_end, _set_end)
def _get_sortby(self):
"""Name of field by which to sort rows
Arguments:
sortby - field name to sort by"""
return self._sortby
def _set_sortby(self, val):
self._validate_option('sortby', val)
self._sortby = val
sortby = property(_get_sortby, _set_sortby)
def _get_reversesort(self):
"""Controls direction of sorting (ascending vs descending)
Arguments:
reveresort - set to True to sort by descending order, or False to sort by ascending order"""
return self._reversesort
def _set_reversesort(self, val):
self._validate_option('reversesort', val)
self._reversesort = val
reversesort = property(_get_reversesort, _set_reversesort)
def _get_sort_key(self):
"""Sorting key function, applied to data points before sorting
Arguments:
sort_key - a function which takes one argument and returns something to be sorted"""
return self._sort_key
def _set_sort_key(self, val):
self._validate_option('sort_key', val)
self._sort_key = val
sort_key = property(_get_sort_key, _set_sort_key)
def _get_header(self):
"""Controls printing of table header with field names
Arguments:
header - print a header showing field names (True or False)"""
return self._header
def _set_header(self, val):
self._validate_option('header', val)
self._header = val
header = property(_get_header, _set_header)
def _get_header_style(self):
"""Controls stylisation applied to field names in header
Arguments:
header_style - stylisation to apply to field names in header ("cap", "title", "upper", "lower" or None)"""
return self._header_style
def _set_header_style(self, val):
self._validate_header_style(val)
self._header_style = val
header_style = property(_get_header_style, _set_header_style)
def _get_border(self):
"""Controls printing of border around table
Arguments:
border - print a border around the table (True or False)"""
return self._border
def _set_border(self, val):
self._validate_option('border', val)
self._border = val
border = property(_get_border, _set_border)
def _get_hrules(self):
"""Controls printing of horizontal rules after rows
Arguments:
hrules - horizontal rules style. Allowed values: FRAME, ALL, HEADER, NONE"""
return self._hrules
def _set_hrules(self, val):
self._validate_option('hrules', val)
self._hrules = val
hrules = property(_get_hrules, _set_hrules)
def _get_vrules(self):
"""Controls printing of vertical rules between columns
Arguments:
vrules - vertical rules style. Allowed values: FRAME, ALL, NONE"""
return self._vrules
def _set_vrules(self, val):
self._validate_option('vrules', val)
self._vrules = val
vrules = property(_get_vrules, _set_vrules)
def _get_int_format(self):
"""Controls formatting of integer data
Arguments:
int_format - integer format string"""
return self._int_format
def _set_int_format(self, val):
# self._validate_option("int_format", val)
for field in self._field_names:
self._int_format[field] = val
int_format = property(_get_int_format, _set_int_format)
def _get_float_format(self):
"""Controls formatting of floating point data
Arguments:
float_format - floating point format string"""
return self._float_format
def _set_float_format(self, val):
# self._validate_option("float_format", val)
for field in self._field_names:
self._float_format[field] = val
float_format = property(_get_float_format, _set_float_format)
def _get_padding_width(self):
"""The number of empty spaces between a column's edge and its content
Arguments:
padding_width - number of spaces, must be a positive integer"""
return self._padding_width
def _set_padding_width(self, val):
self._validate_option('padding_width', val)
self._padding_width = val
padding_width = property(_get_padding_width, _set_padding_width)
def _get_left_padding_width(self):
"""The number of empty spaces between a column's left edge and its content
Arguments:
left_padding - number of spaces, must be a positive integer"""
return self._left_padding_width
def _set_left_padding_width(self, val):
self._validate_option('left_padding_width', val)
self._left_padding_width = val
left_padding_width = property(_get_left_padding_width, _set_left_padding_width)
def _get_right_padding_width(self):
"""The number of empty spaces between a column's right edge and its content
Arguments:
right_padding - number of spaces, must be a positive integer"""
return self._right_padding_width
def _set_right_padding_width(self, val):
self._validate_option('right_padding_width', val)
self._right_padding_width = val
right_padding_width = property(_get_right_padding_width, _set_right_padding_width)
def _get_vertical_char(self):
"""The charcter used when printing table borders to draw vertical lines
Arguments:
vertical_char - single character string used to draw vertical lines"""
return self._vertical_char
def _set_vertical_char(self, val):
val = self._unicode(val)
self._validate_option('vertical_char', val)
self._vertical_char = val
vertical_char = property(_get_vertical_char, _set_vertical_char)
def _get_horizontal_char(self):
"""The charcter used when printing table borders to draw horizontal lines
Arguments:
horizontal_char - single character string used to draw horizontal lines"""
return self._horizontal_char
def _set_horizontal_char(self, val):
val = self._unicode(val)
self._validate_option('horizontal_char', val)
self._horizontal_char = val
horizontal_char = property(_get_horizontal_char, _set_horizontal_char)
def _get_junction_char(self):
"""The charcter used when printing table borders to draw line junctions
Arguments:
junction_char - single character string used to draw line junctions"""
return self._junction_char
def _set_junction_char(self, val):
val = self._unicode(val)
self._validate_option('vertical_char', val)
self._junction_char = val
junction_char = property(_get_junction_char, _set_junction_char)
def _get_format(self):
"""Controls whether or not HTML tables are formatted to match styling options
Arguments:
format - True or False"""
return self._format
def _set_format(self, val):
self._validate_option('format', val)
self._format = val
format = property(_get_format, _set_format)
def _get_print_empty(self):
"""Controls whether or not empty tables produce a header and frame or just an empty string
Arguments:
print_empty - True or False"""
return self._print_empty
def _set_print_empty(self, val):
self._validate_option('print_empty', val)
self._print_empty = val
print_empty = property(_get_print_empty, _set_print_empty)
def _get_attributes(self):
"""A dictionary of HTML attribute name/value pairs to be included in the <table> tag when printing HTML
Arguments:
attributes - dictionary of attributes"""
return self._attributes
def _set_attributes(self, val):
self._validate_option('attributes', val)
self._attributes = val
attributes = property(_get_attributes, _set_attributes)
##############################
# OPTION MIXER #
##############################
def _get_options(self, kwargs):
options = {}
for option in self._options:
if option in kwargs:
self._validate_option(option, kwargs[option])
options[option] = kwargs[option]
else:
options[option] = getattr(self, '_' + option)
return options
##############################
# PRESET STYLE LOGIC #
##############################
def set_style(self, style):
if style == DEFAULT:
self._set_default_style()
elif style == MSWORD_FRIENDLY:
self._set_msword_style()
elif style == PLAIN_COLUMNS:
self._set_columns_style()
elif style == RANDOM:
self._set_random_style()
else:
raise Exception('Invalid pre-set style!')
def _set_default_style(self):
self.header = True
self.border = True
self._hrules = FRAME
self._vrules = ALL
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = '|'
self.horizontal_char = '-'
self.junction_char = '+'
def _set_msword_style(self):
self.header = True
self.border = True
self._hrules = NONE
self.padding_width = 1
self.left_padding_width = 1
self.right_padding_width = 1
self.vertical_char = '|'
def _set_columns_style(self):
self.header = True
self.border = False
self.padding_width = 1
self.left_padding_width = 0
self.right_padding_width = 8
def _set_random_style(self):
# Just for fun!
self.header = random.choice((True, False))
self.border = random.choice((True, False))
self._hrules = random.choice((ALL, FRAME, HEADER, NONE))
self._vrules = random.choice((ALL, FRAME, NONE))
self.left_padding_width = random.randint(0, 5)
self.right_padding_width = random.randint(0, 5)
self.vertical_char = random.choice('~!@#$%^&*()_+|-={}[];\':",./;<>?')
self.horizontal_char = random.choice('~!@#$%^&*()_+|-={}[];\':",./;<>?')
self.junction_char = random.choice('~!@#$%^&*()_+|-={}[];\':",./;<>?')
##############################
# DATA INPUT METHODS #
##############################
def add_row(self, row):
"""Add a row to the table
Arguments:
row - row of data, should be a list with as many elements as the table
has fields"""
if self._field_names and len(row) != len(self._field_names):
raise Exception(
'Row has incorrect number of values, (actual) %d!=%d (expected)'
% (len(row), len(self._field_names))
)
if not self._field_names:
self.field_names = [('Field %d' % (n + 1)) for n in range(0, len(row))]
self._rows.append(list(row))
def del_row(self, row_index):
"""Delete a row to the table
Arguments:
row_index - The index of the row you want to delete. Indexing starts at 0."""
if row_index > len(self._rows) - 1:
raise Exception(
'Cant delete row at index %d, table only has %d rows!'
% (row_index, len(self._rows))
)
del self._rows[row_index]
def add_column(self, fieldname, column, align='c', valign='t'):
"""Add a column to the table.
Arguments:
fieldname - name of the field to contain the new column of data
column - column of data, should be a list with as many elements as the
table has rows
align - desired alignment for this column - "l" for left, "c" for centre and "r" for right
valign - desired vertical alignment for new columns - "t" for top, "m" for middle and "b" for bottom"""
if len(self._rows) in (0, len(column)):
self._validate_align(align)
self._validate_valign(valign)
self._field_names.append(fieldname)
self._align[fieldname] = align
self._valign[fieldname] = valign
for i in range(0, len(column)):
if len(self._rows) < i + 1:
self._rows.append([])
self._rows[i].append(column[i])
else:
raise Exception(
'Column length %d does not match number of rows %d!'
% (len(column), len(self._rows))
)
def clear_rows(self):
"""Delete all rows from the table but keep the current field names"""
self._rows = []
def clear(self):
"""Delete all rows and field names from the table, maintaining nothing but styling options"""
self._rows = []
self._field_names = []
self._widths = []
##############################
# MISC PUBLIC METHODS #
##############################
def copy(self):
return copy.deepcopy(self)
##############################
# MISC PRIVATE METHODS #
##############################
def _format_value(self, field, value):
if isinstance(value, int) and field in self._int_format:
value = self._unicode(('%%%sd' % self._int_format[field]) % value)
elif isinstance(value, float) and field in self._float_format:
value = self._unicode(('%%%sf' % self._float_format[field]) % value)
return self._unicode(value)
def _compute_widths(self, rows, options):
if options['header']:
widths = [_get_size(field)[0] for field in self._field_names]
else:
widths = len(self.field_names) * [0]
for row in rows:
for index, value in enumerate(row):
fieldname = self.field_names[index]
if fieldname in self.max_width:
widths[index] = max(
widths[index],
min(_get_size(value)[0], self.max_width[fieldname]),
)
else:
widths[index] = max(widths[index], _get_size(value)[0])
self._widths = widths
def _get_padding_widths(self, options):
if options['left_padding_width'] is not None:
lpad = options['left_padding_width']
else:
lpad = options['padding_width']
if options['right_padding_width'] is not None:
rpad = options['right_padding_width']
else:
rpad = options['padding_width']
return lpad, rpad
def _get_rows(self, options):
"""Return only those data rows that should be printed, based on slicing and sorting.
Arguments:
options - dictionary of option settings."""
# Make a copy of only those rows in the slice range
rows = copy.deepcopy(self._rows[options['start'] : options['end']])
# Sort if necessary
if options['sortby']:
sortindex = self._field_names.index(options['sortby'])
# Decorate
rows = [[row[sortindex]] + row for row in rows]
# Sort
rows.sort(reverse=options['reversesort'], key=options['sort_key'])
# Undecorate
rows = [row[1:] for row in rows]
return rows
def _format_row(self, row, options):
return [
self._format_value(field, value)
for (field, value) in zip(self._field_names, row)
]
def _format_rows(self, rows, options):
return [self._format_row(row, options) for row in rows]
##############################
# PLAIN TEXT STRING METHODS #
##############################
def get_string(self, **kwargs):
"""Return string representation of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
vertical_char - single character string used to draw vertical lines
horizontal_char - single character string used to draw horizontal lines
junction_char - single character string used to draw line junctions
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
reversesort - True or False to sort in descending or ascending order
print empty - if True, stringify just the header for an empty table, if False return an empty string """
options = self._get_options(kwargs)
lines = []
# Don't think too hard about an empty table
# Is this the desired behaviour? Maybe we should still print the header?
if self.rowcount == 0 and (not options['print_empty'] or not options['border']):
return ''
# Get the rows we need to print, taking into account slicing, sorting, etc.
rows = self._get_rows(options)
# Turn all data in all rows into Unicode, formatted as desired
formatted_rows = self._format_rows(rows, options)
# Compute column widths
self._compute_widths(formatted_rows, options)
# Add header or top of border
self._hrule = self._stringify_hrule(options)
if options['header']:
lines.append(self._stringify_header(options))
elif options['border'] and options['hrules'] in (ALL, FRAME):
lines.append(self._hrule)
# Add rows
for row in formatted_rows:
lines.append(self._stringify_row(row, options))
# Add bottom of border
if options['border'] and options['hrules'] == FRAME:
lines.append(self._hrule)
return self._unicode('\n').join(lines)
def _stringify_hrule(self, options):
if not options['border']:
return ''
lpad, rpad = self._get_padding_widths(options)
if options['vrules'] in (ALL, FRAME):
bits = [options['junction_char']]
else:
bits = [options['horizontal_char']]
# For tables with no data or fieldnames
if not self._field_names:
bits.append(options['junction_char'])
return ''.join(bits)
for field, width in zip(self._field_names, self._widths):
if options['fields'] and field not in options['fields']:
continue
bits.append((width + lpad + rpad) * options['horizontal_char'])
if options['vrules'] == ALL:
bits.append(options['junction_char'])
else:
bits.append(options['horizontal_char'])
if options['vrules'] == FRAME:
bits.pop()
bits.append(options['junction_char'])
return ''.join(bits)
def _stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options['border']:
if options['hrules'] in (ALL, FRAME):
bits.append(self._hrule)
bits.append('\n')
if options['vrules'] in (ALL, FRAME):
bits.append(options['vertical_char'])
else:
bits.append(' ')
# For tables with no data or field names
if not self._field_names:
if options['vrules'] in (ALL, FRAME):
bits.append(options['vertical_char'])
else:
bits.append(' ')
for field, width, in zip(self._field_names, self._widths):
if options['fields'] and field not in options['fields']:
continue
if self._header_style == 'cap':
fieldname = field.capitalize()
elif self._header_style == 'title':
fieldname = field.title()
elif self._header_style == 'upper':
fieldname = field.upper()
elif self._header_style == 'lower':
fieldname = field.lower()
else:
fieldname = field
bits.append(
' ' * lpad
+ self._justify(fieldname, width, self._align[field])
+ ' ' * rpad
)
if options['border']:
if options['vrules'] == ALL:
bits.append(options['vertical_char'])
else:
bits.append(' ')
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
if options['border'] and options['vrules'] == FRAME:
bits.pop()
bits.append(options['vertical_char'])
if options['border'] and options['hrules'] != NONE:
bits.append('\n')
bits.append(self._hrule)
return ''.join(bits)
def _stringify_row(self, row, options):
for index, field, value, width, in zip(
list(range(0, len(row))), self._field_names, row, self._widths
):
# Enforce max widths
lines = value.split('\n')
new_lines = []
for line in lines:
if _str_block_width(line) > width:
line = textwrap.fill(line, width)
new_lines.append(line)
lines = new_lines
value = '\n'.join(lines)
row[index] = value
row_height = 0
for c in row:
h = _get_size(c)[1]
if h > row_height:
row_height = h
bits = []
lpad, rpad = self._get_padding_widths(options)
for y in range(0, row_height):
bits.append([])
if options['border']:
if options['vrules'] in (ALL, FRAME):
bits[y].append(self.vertical_char)
else:
bits[y].append(' ')
for field, value, width, in zip(self._field_names, row, self._widths):
valign = self._valign[field]
lines = value.split('\n')
dHeight = row_height - len(lines)
if dHeight:
if valign == 'm':
lines = (
[''] * int(dHeight / 2)
+ lines
+ [''] * (dHeight - int(dHeight / 2))
)
elif valign == 'b':
lines = [''] * dHeight + lines
else:
lines = lines + [''] * dHeight
y = 0
for l in lines:
if options['fields'] and field not in options['fields']:
continue
bits[y].append(
' ' * lpad
+ self._justify(l, width, self._align[field])
+ ' ' * rpad
)
if options['border']:
if options['vrules'] == ALL:
bits[y].append(self.vertical_char)
else:
bits[y].append(' ')
y += 1
# If vrules is FRAME, then we just appended a space at the end
# of the last field, when we really want a vertical character
for y in range(0, row_height):
if options['border'] and options['vrules'] == FRAME:
bits[y].pop()
bits[y].append(options['vertical_char'])
if options['border'] and options['hrules'] == ALL:
bits[row_height - 1].append('\n')
bits[row_height - 1].append(self._hrule)
for y in range(0, row_height):
bits[y] = ''.join(bits[y])
return '\n'.join(bits)
##############################
# HTML STRING METHODS #
##############################
def get_html_string(self, **kwargs):
"""Return string representation of HTML formatted version of table in current state.
Arguments:
start - index of first data row to include in output
end - index of last data row to include in output PLUS ONE (list slice style)
fields - names of fields (columns) to include
header - print a header showing field names (True or False)
border - print a border around the table (True or False)
hrules - controls printing of horizontal rules after rows. Allowed values: ALL, FRAME, HEADER, NONE
vrules - controls printing of vertical rules between columns. Allowed values: FRAME, ALL, NONE
int_format - controls formatting of integer data
float_format - controls formatting of floating point data
padding_width - number of spaces on either side of column data (only used if left and right paddings are None)
left_padding_width - number of spaces on left hand side of column data
right_padding_width - number of spaces on right hand side of column data
sortby - name of field to sort rows by
sort_key - sorting key function, applied to data points before sorting
attributes - dictionary of name/value pairs to include as HTML attributes in the <table> tag
xhtml - print <br/> tags if True, <br> tags if false"""
options = self._get_options(kwargs)
if options['format']:
string = self._get_formatted_html_string(options)
else:
string = self._get_simple_html_string(options)
return string
def _get_simple_html_string(self, options):
lines = []
if options['xhtml']:
linebreak = '<br/>'
else:
linebreak = '<br>'
open_tag = []
open_tag.append('<table')
if options['attributes']:
for attr_name in options['attributes']:
open_tag.append(
' %s="%s"' % (attr_name, options['attributes'][attr_name])
)
open_tag.append('>')
lines.append(''.join(open_tag))
# Headers
if options['header']:
lines.append(' <tr>')
for field in self._field_names:
if options['fields'] and field not in options['fields']:
continue
lines.append(
' <th>%s</th>' % escape(field).replace('\n', linebreak)
)
lines.append(' </tr>')
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
for row in formatted_rows:
lines.append(' <tr>')
for field, datum in zip(self._field_names, row):
if options['fields'] and field not in options['fields']:
continue
lines.append(
' <td>%s</td>' % escape(datum).replace('\n', linebreak)
)
lines.append(' </tr>')
lines.append('</table>')
return self._unicode('\n').join(lines)
def _get_formatted_html_string(self, options):
lines = []
lpad, rpad = self._get_padding_widths(options)
if options['xhtml']:
linebreak = '<br/>'
else:
linebreak = '<br>'
open_tag = []
open_tag.append('<table')
if options['border']:
if options['hrules'] == ALL and options['vrules'] == ALL:
open_tag.append(' frame="box" rules="all"')
elif options['hrules'] == FRAME and options['vrules'] == FRAME:
open_tag.append(' frame="box"')
elif options['hrules'] == FRAME and options['vrules'] == ALL:
open_tag.append(' frame="box" rules="cols"')
elif options['hrules'] == FRAME:
open_tag.append(' frame="hsides"')
elif options['hrules'] == ALL:
open_tag.append(' frame="hsides" rules="rows"')
elif options['vrules'] == FRAME:
open_tag.append(' frame="vsides"')
elif options['vrules'] == ALL:
open_tag.append(' frame="vsides" rules="cols"')
if options['attributes']:
for attr_name in options['attributes']:
open_tag.append(
' %s="%s"' % (attr_name, options['attributes'][attr_name])
)
open_tag.append('>')
lines.append(''.join(open_tag))
# Headers
if options['header']:
lines.append(' <tr>')
for field in self._field_names:
if options['fields'] and field not in options['fields']:
continue
lines.append(
' <th style="padding-left: %dem; padding-right: %dem; text-align: center">%s</th>'
% (lpad, rpad, escape(field).replace('\n', linebreak))
)
lines.append(' </tr>')
# Data
rows = self._get_rows(options)
formatted_rows = self._format_rows(rows, options)
aligns = []
valigns = []
for field in self._field_names:
aligns.append(
{'l': 'left', 'r': 'right', 'c': 'center'}[self._align[field]]
)
valigns.append(
{'t': 'top', 'm': 'middle', 'b': 'bottom'}[self._valign[field]]
)
for row in formatted_rows:
lines.append(' <tr>')
for field, datum, align, valign in zip(
self._field_names, row, aligns, valigns
):
if options['fields'] and field not in options['fields']:
continue
lines.append(
' <td style="padding-left: %dem; padding-right: %dem; text-align: %s; vertical-align: %s">%s</td>'
% (
lpad,
rpad,
align,
valign,
escape(datum).replace('\n', linebreak),
)
)
lines.append(' </tr>')
lines.append('</table>')
return self._unicode('\n').join(lines)
##############################
# UNICODE WIDTH FUNCTIONS #
##############################
def _char_block_width(char):
# Basic Latin, which is probably the most common case
# if char in xrange(0x0021, 0x007e):
# if char >= 0x0021 and char <= 0x007e:
if 0x0021 <= char <= 0x007E:
return 1
# Chinese, Japanese, Korean (common)
if 0x4E00 <= char <= 0x9FFF:
return 2
# Hangul
if 0xAC00 <= char <= 0xD7AF:
return 2
# Combining?
if unicodedata.combining(uni_chr(char)):
return 0
# Hiragana and Katakana
if 0x3040 <= char <= 0x309F or 0x30A0 <= char <= 0x30FF:
return 2
# Full-width Latin characters
if 0xFF01 <= char <= 0xFF60:
return 2
# CJK punctuation
if 0x3000 <= char <= 0x303E:
return 2
# Backspace and delete
if char in (0x0008, 0x007F):
return -1
# Other control characters
elif char in (0x0000, 0x001F):
return 0
# Take a guess
return 1
def _str_block_width(val):
return sum(map(_char_block_width, list(map(ord, _re.sub('', val)))))
##############################
# TABLE FACTORIES #
##############################
def from_csv(fp, field_names=None, **kwargs):
dialect = csv.Sniffer().sniff(fp.read(1024))
fp.seek(0)
reader = csv.reader(fp, dialect)
table = PrettyTable(**kwargs)
if field_names:
table.field_names = field_names
else:
if py3k:
table.field_names = [x.strip() for x in next(reader)]
else:
table.field_names = [x.strip() for x in next(reader)]
for row in reader:
table.add_row([x.strip() for x in row])
return table
def from_db_cursor(cursor, **kwargs):
if cursor.description:
table = PrettyTable(**kwargs)
table.field_names = [col[0] for col in cursor.description]
for row in cursor.fetchall():
table.add_row(row)
return table
class TableHandler(HTMLParser):
def __init__(self, **kwargs):
HTMLParser.__init__(self)
self.kwargs = kwargs
self.tables = []
self.last_row = []
self.rows = []
self.max_row_width = 0
self.active = None
self.last_content = ''
self.is_last_row_header = False
def handle_starttag(self, tag, attrs):
self.active = tag
if tag == 'th':
self.is_last_row_header = True
def handle_endtag(self, tag):
if tag in ['th', 'td']:
stripped_content = self.last_content.strip()
self.last_row.append(stripped_content)
if tag == 'tr':
self.rows.append((self.last_row, self.is_last_row_header))
self.max_row_width = max(self.max_row_width, len(self.last_row))
self.last_row = []
self.is_last_row_header = False
if tag == 'table':
table = self.generate_table(self.rows)
self.tables.append(table)
self.rows = []
self.last_content = ' '
self.active = None
def handle_data(self, data):
self.last_content += data
def generate_table(self, rows):
"""
Generates from a list of rows a PrettyTable object.
"""
table = PrettyTable(**self.kwargs)
for row in self.rows:
if len(row[0]) < self.max_row_width:
appends = self.max_row_width - len(row[0])
for i in range(1, appends):
row[0].append('-')
if row[1] == True:
self.make_fields_unique(row[0])
table.field_names = row[0]
else:
table.add_row(row[0])
return table
def make_fields_unique(self, fields):
"""
iterates over the row and make each field unique
"""
for i in range(0, len(fields)):
for j in range(i + 1, len(fields)):
if fields[i] == fields[j]:
fields[j] += "'"
def from_html(html_code, **kwargs):
"""
Generates a list of PrettyTables from a string of HTML code. Each <table> in
the HTML becomes one PrettyTable object.
"""
parser = TableHandler(**kwargs)
parser.feed(html_code)
return parser.tables
def from_html_one(html_code, **kwargs):
"""
Generates a PrettyTables from a string of HTML code which contains only a
single <table>
"""
tables = from_html(html_code, **kwargs)
try:
assert len(tables) == 1
except AssertionError:
raise Exception(
'More than one <table> in provided HTML code! Use from_html instead.'
)
return tables[0]
##############################
# MAIN (TEST FUNCTION) #
##############################
def main():
x = PrettyTable(['City name', 'Area', 'Population', 'Annual Rainfall'])
x.sortby = 'Population'
x.reversesort = True
x.int_format['Area'] = '04d'
x.float_format = '6.1f'
x.align['City name'] = 'l' # Left align city names
x.add_row(['Hoehenkirchen\nSiegertsbrunn', 1295, 1158259, 600.5])
x.add_row(['Adelaide', 1295, 1158259, 600.5])
x.add_row(['Brisbane', 5905, 1857594, 1146.4])
x.add_row(['Darwin', 112, 120900, 1714.7])
x.add_row(['Hobart', 1357, 205556, 619.5])
x.add_row(['Sydney', 2058, 4336374, 1214.8])
x.add_row(['Melbourne', 1566, 3806092, 646.9])
x.add_row(['Perth', 5386, 1554769, 869.4])
print(x)
if __name__ == '__main__':
main()
|
the-stack_0_10160 | import logging
import time
from collections import defaultdict
from queue import Queue
from threading import Thread
from kube_hunter.conf import get_config
from kube_hunter.core.types import ActiveHunter, HunterBase
from kube_hunter.core.events.types import Vulnerability, EventFilterBase, MultipleEventsContainer
logger = logging.getLogger(__name__)
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue):
def __init__(self, num_worker=10):
super().__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.all_hunters = dict()
self.running = True
self.workers = list()
# -- Regular Subscription --
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
self.hooks = defaultdict(list)
self.filters = defaultdict(list)
# --------------------------
# -- Multiple Subscription --
# Structure: key: Event Class, value: tuple(Registered Hunter, Predicate Function)
self.multi_hooks = defaultdict(list)
# When subscribing to multiple events, this gets populated with required event classes
# Structure: key: Hunter Class, value: set(RequiredEventClass1, RequiredEventClass2)
self.hook_dependencies = defaultdict(set)
# To keep track of fulfilled dependencies. we need to have a structure which saves historical instanciated
# events mapped to a registered hunter.
# We used a 2 dimensional dictionary in order to fulfill two demands:
# * correctly count published required events
# * save historical events fired, easily sorted by their type
#
# Structure: hook_fulfilled_deps[hunter_class] -> fulfilled_events_for_hunter[event_class] -> [EventObject, EventObject2]
self.hook_fulfilled_deps = defaultdict(lambda: defaultdict(list))
# ---------------------------
for _ in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
"""
######################################################
+ ----------------- Public Methods ----------------- +
######################################################
"""
def subscribe(self, event, hook=None, predicate=None, is_register=True):
"""
The Subscribe Decorator - For Regular Registration
Use this to register for one event only. Your hunter will execute each time this event is published
@param event - Event class to subscribe to
@param predicate - Optional: Function that will be called with the published event as a parameter before trigger.
If it's return value is False, the Hunter will not run (default=None).
@param hook - Hunter class to register for (ignore when using as a decorator)
"""
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
return hook
return wrapper
def subscribe_many(self, events, hook=None, predicates=None, is_register=True):
"""
The Subscribe Many Decorator - For Multiple Registration,
When your attack needs several prerequisites to exist in the cluster, You need to register for multiple events.
Your hunter will execute once for every new combination of required events.
For example:
1. event A was published 3 times
2. event B was published once.
3. event B was published again
Your hunter will execute 2 times:
* (on step 2) with the newest version of A
* (on step 3) with the newest version of A and newest version of B
@param events - List of event classes to subscribe to
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
If it's return value is False, the Hunter will not run (default=None).
@param hook - Hunter class to register for (ignore when using as a decorator)
"""
def wrapper(hook):
self.subscribe_events(events, hook=hook, predicates=predicates, is_register=is_register)
return hook
return wrapper
def subscribe_once(self, event, hook=None, predicate=None, is_register=True):
"""
The Subscribe Once Decorator - For Single Trigger Registration,
Use this when you want your hunter to execute only in your entire program run
wraps subscribe_event method
@param events - List of event classes to subscribe to
@param predicates - Optional: List of function that will be called with the published event as a parameter before trigger.
If it's return value is False, the Hunter will not run (default=None).
@param hook - Hunter class to register for (ignore when using as a decorator)
"""
def wrapper(hook):
# installing a __new__ magic method on the hunter
# which will remove the hunter from the list upon creation
def __new__unsubscribe_self(self, cls):
handler.hooks[event].remove((hook, predicate))
return object.__new__(self)
hook.__new__ = __new__unsubscribe_self
self.subscribe_event(event, hook=hook, predicate=predicate, is_register=is_register)
return hook
return wrapper
def publish_event(self, event, caller=None):
"""
The Publish Event Method - For Publishing Events To Kube-Hunter's Queue
"""
# Document that the hunter published a vulnerability (if it's indeed a vulnerability)
# For statistics options
self._increase_vuln_count(event, caller)
# sets the event's parent to be it's publisher hunter.
self._set_event_chain(event, caller)
# applying filters on the event, before publishing it to subscribers.
# if filter returned None, not proceeding to publish
event = self.apply_filters(event)
if event:
# If event was rewritten, make sure it's linked again
self._set_event_chain(event, caller)
# Regular Hunter registrations - publish logic
# Here we iterate over all the registered-to events:
for hooked_event in self.hooks.keys():
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
# Meaning - if this is a relevant event:
if hooked_event in event.__class__.__mro__:
# If so, we want to publish to all registerd hunters.
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
self.put(hook(event))
logger.debug(f"Event {event.__class__} got published to hunter - {hook} with {event}")
# Multiple Hunter registrations - publish logic
# Here we iterate over all the registered-to events:
for hooked_event in self.multi_hooks.keys():
# We check if the event we want to publish is an inherited class of the current registered-to iterated event
# Meaning - if this is a relevant event:
if hooked_event in event.__class__.__mro__:
# now we iterate over the corresponding registered hunters.
for hook, predicate in self.multi_hooks[hooked_event]:
if predicate and not predicate(event):
continue
self._update_multi_hooks(hook, event)
if self._is_all_fulfilled_for_hunter(hook):
events_container = MultipleEventsContainer(self._get_latest_events_from_multi_hooks(hook))
self.put(hook(events_container))
logger.debug(
f"Multiple subscription requirements were met for hunter {hook}. events container was \
published with {self.hook_fulfilled_deps[hook].keys()}"
)
"""
######################################################
+ ---------------- Private Methods ----------------- +
+ ---------------- (Backend Logic) ----------------- +
######################################################
"""
def _get_latest_events_from_multi_hooks(self, hook):
"""
Iterates over fulfilled deps for the hunter, and fetching the latest appended events from history
"""
latest_events = list()
for event_class in self.hook_fulfilled_deps[hook].keys():
latest_events.append(self.hook_fulfilled_deps[hook][event_class][-1])
return latest_events
def _update_multi_hooks(self, hook, event):
"""
Updates published events in the multi hooks fulfilled store.
"""
self.hook_fulfilled_deps[hook][event.__class__].append(event)
def _is_all_fulfilled_for_hunter(self, hook):
"""
Returns true for multi hook fulfilled, else oterwise
"""
# Check if the first dimension already contains all necessary event classes
return len(self.hook_fulfilled_deps[hook].keys()) == len(self.hook_dependencies[hook])
def _set_event_chain(self, event, caller):
"""
Sets' events attribute chain.
In here we link the event with it's publisher (Hunter),
so in the next hunter that catches this event, we could access the previous one's attributes.
@param event: the event object to be chained
@param caller: the Hunter object that published this event.
"""
if caller:
event.previous = caller.event
event.hunter = caller.__class__
def _register_hunters(self, hook=None):
"""
This method is called when a Hunter registers itself to the handler.
this is done in order to track and correctly configure the current run of the program.
passive_hunters, active_hunters, all_hunters
"""
config = get_config()
if ActiveHunter in hook.__mro__:
if not config.active:
return False
else:
self.active_hunters[hook] = hook.__doc__
elif HunterBase in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if HunterBase in hook.__mro__:
self.all_hunters[hook] = hook.__doc__
return True
def _register_filter(self, event, hook=None, predicate=None):
if hook not in self.filters[event]:
self.filters[event].append((hook, predicate))
logging.debug("{} filter subscribed to {}".format(hook, event))
def _register_hook(self, event, hook=None, predicate=None):
if hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logging.debug("{} subscribed to {}".format(hook, event))
def subscribe_event(self, event, hook=None, predicate=None, is_register=True):
if not is_register:
return
if not self._register_hunters(hook):
return
# registering filters
if EventFilterBase in hook.__mro__:
self._register_filter(event, hook, predicate)
# registering hunters
else:
self._register_hook(event, hook, predicate)
def subscribe_events(self, events, hook=None, predicates=None, is_register=True):
if not is_register:
return False
if not self._register_hunters(hook):
return False
if predicates is None:
predicates = [None] * len(events)
# registering filters.
if EventFilterBase in hook.__mro__:
for event, predicate in zip(events, predicates):
self._register_filter(event, hook, predicate)
# registering hunters.
else:
for event, predicate in zip(events, predicates):
self.multi_hooks[event].append((hook, predicate))
self.hook_dependencies[hook] = frozenset(events)
def apply_filters(self, event):
# if filters are subscribed, apply them on the event
for hooked_event in self.filters.keys():
if hooked_event in event.__class__.__mro__:
for filter_hook, predicate in self.filters[hooked_event]:
if predicate and not predicate(event):
continue
logger.debug(f"Event {event.__class__} filtered with {filter_hook}")
event = filter_hook(event).execute()
# if filter decided to remove event, returning None
if not event:
return None
return event
def _increase_vuln_count(self, event, caller):
config = get_config()
if config.statistics and caller:
if Vulnerability in event.__class__.__mro__:
caller.__class__.publishedVulnerabilities += 1
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
try:
hook = self.get()
logger.debug(f"Executing {hook.__class__} with {hook.event.__dict__}")
hook.execute()
except Exception as ex:
logger.debug(ex, exc_info=True)
finally:
self.task_done()
logger.debug("closing thread...")
def notifier(self):
time.sleep(2)
# should consider locking on unfinished_tasks
while self.unfinished_tasks > 0:
logger.debug(f"{self.unfinished_tasks} tasks left")
time.sleep(3)
if self.unfinished_tasks == 1:
logger.debug("final hook is hanging")
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)
|
the-stack_0_10162 | from cConstants import cEPAConstants, cPlotConstants
from cEnum import eEPA
import cPlot2D
import cPlotEPA
import sys
sys.path.append("../")
import bayesact
import wx
class cPlotFrame(cPlotEPA.cPlotFrame):
def __init__(self, iParent, **kwargs):
cPlot2D.cPlotFrame.__init__(self, iParent, **kwargs)
def initPanel(self, *args, **kwargs):
self.m_PlotPanel = cPlotPanel(self, **kwargs)
class cPlotPanel(cPlot2D.cPlotPanel):
def __init__(self, iParent, iXAxisItem=eEPA.evaluation, iYAxisItem=eEPA.potency, iPlotType=eEPA.fundamental, **kwargs):
cPlot2D.cPlotPanel.__init__(self, iParent, **kwargs)
self.m_XAxisItem = iXAxisItem
self.m_YAxisItem = iYAxisItem
self.m_PlotType = iPlotType
self.m_SimInteractiveTabsPanel = iParent
# The other plots, changes the x and y boundaries of this plot will be done the same to other plots
# Good for comparing multiple plots
self.m_TwinPlots = []
self.m_LearnerSamples = []
self.m_SimulatorSamples = []
def getSentimentEPAIndex(self, iEPA, iSentiment):
return iEPA + (cEPAConstants.m_Dimensions * iSentiment)
# Axis items are the enumerations of the elements in eEPA, so they're basically numbers
def setAxis(iXAxisItem, iYAxisItem):
self.m_XAxisItem = iXAxisItem
self.m_YAxisItem = iYAxisItem
def plotEPA(self, iLearnerSamples, iSimulatorSamples, iLearnerPreviousAction, iSimulatorPreviousAction):
self.clearAxes()
# Size is the size of the point in terms of viewing size
lsize=50
# Alpha is the opacity of the point
lalpha=0.5
self.m_LearnerSamples = iLearnerSamples
self.m_SimulatorSamples = iSimulatorSamples
if (0 < len(iLearnerSamples)):
# Learner's sentiments on self and other, green and pink respectively
learnerSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)
learnerSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)
learnerSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)
learnerSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)
self.plotScatter(
iLearnerSamples[learnerSamplesXIndexSelf],
iLearnerSamples[learnerSamplesYIndexSelf],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="cyan", alpha=lalpha, animated=False)
self.plotScatter(
iLearnerSamples[learnerSamplesXIndexOther],
iLearnerSamples[learnerSamplesYIndexOther],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="blue", alpha=lalpha, animated=False)
# This also checks that when an action has an EPA rating of (0, 0, 0), it will not plot it
if (0 < len(iLearnerPreviousAction)):
if ((0, 0, 0) == (iLearnerPreviousAction[0], iLearnerPreviousAction[1], iLearnerPreviousAction[2])):
pass
else:
self.plotScatter(
iLearnerPreviousAction[self.m_XAxisItem],
iLearnerPreviousAction[self.m_YAxisItem],
marker="*", s=200, c="turquoise", alpha=1)
if (0 < len(iSimulatorSamples)):
# Simulator's sentiments on self and other, goldenrod and blue respectively
simulatorSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)
simulatorSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)
simulatorSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)
simulatorSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)
self.plotScatter(
iSimulatorSamples[simulatorSamplesXIndexSelf],
iSimulatorSamples[simulatorSamplesYIndexSelf],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="magenta", alpha=lalpha, animated=False)
self.plotScatter(
iSimulatorSamples[simulatorSamplesXIndexOther],
iSimulatorSamples[simulatorSamplesYIndexOther],
iAutoScaling=False, iRedraw=False, iUpdate=False, marker="o", s=lsize, c="red", alpha=lalpha, animated=False)
if (0 < len(iSimulatorPreviousAction)):
if ((0, 0, 0) == (iSimulatorPreviousAction[0],iSimulatorPreviousAction[1], iSimulatorPreviousAction[2])):
pass
else:
self.plotScatter(
iSimulatorPreviousAction[self.m_XAxisItem],
iSimulatorPreviousAction[self.m_YAxisItem],
marker="*", s=200, c="magenta", alpha=1)
self.m_Axes.set_title(self.m_Title, fontsize=12)
self.m_Axes.set_xlabel(cEPAConstants.m_EPALabels[self.m_XAxisItem])
self.m_Axes.set_ylabel(cEPAConstants.m_EPALabels[self.m_YAxisItem])
self.redrawAxes()
def onMousePress(self, iEvent):
# Returns (index, minDist), where minDist is the minimum euclidean distance calculated
def getMin(data, x, y):
index = 0
minDist = ((data[0][0] - x) ** 2) + ((data[1][0] - y) ** 2)
points = len(data[0])
for i in range(points-1):
dist = ((data[0][i+1] - x) ** 2) + ((data[1][i+1] - y) ** 2)
if (dist < minDist):
minDist = dist
index = i+1
return (index, minDist)
def getSampleEPA(data, dataIndex, evaluationIndex, potencyIndex, activityIndex):
return [data[evaluationIndex][dataIndex], data[potencyIndex][dataIndex], data[activityIndex][dataIndex]]
# Do default function, then find closest point, if anything is plotted
# Please note that this does not include the previous action
super(cPlotPanel, self).onMousePress(iEvent)
# 1 represents left click, check for closest point when left clicking
if(1 != iEvent.button):
return
if (iEvent.inaxes != self.m_Axes):
return
if (0 >= len(self.m_LearnerSamples)):
return
xPoint = iEvent.xdata
yPoint = iEvent.ydata
learnerSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)
learnerSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)
learnerSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)
learnerSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)
simulatorSamplesXIndexSelf = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_SelfMultiplier)
simulatorSamplesYIndexSelf = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_SelfMultiplier)
simulatorSamplesXIndexOther = self.getSentimentEPAIndex(self.m_XAxisItem, cEPAConstants.m_OtherMultiplier)
simulatorSamplesYIndexOther = self.getSentimentEPAIndex(self.m_YAxisItem, cEPAConstants.m_OtherMultiplier)
# To find the closest point to where the mouse clicked
visibleLearnerSelfData = [self.m_LearnerSamples[learnerSamplesXIndexSelf], self.m_LearnerSamples[learnerSamplesYIndexSelf]]
visibleLearnerOtherData = [self.m_LearnerSamples[learnerSamplesXIndexOther], self.m_LearnerSamples[learnerSamplesYIndexOther]]
visibleSimulatorSelfData = [self.m_SimulatorSamples[simulatorSamplesXIndexSelf], self.m_SimulatorSamples[simulatorSamplesYIndexSelf]]
visibleSimulatorOtherData = [self.m_SimulatorSamples[simulatorSamplesXIndexOther], self.m_SimulatorSamples[simulatorSamplesYIndexOther]]
learnerSelf, learnerOther, simulatorSelf, simulatorOther = range(4)
currentMinIndex, currentMinDist = getMin(visibleLearnerSelfData, xPoint, yPoint)
currentMinData = learnerSelf
allOtherData = [visibleLearnerOtherData, visibleSimulatorSelfData, visibleSimulatorOtherData]
for i in range(len(allOtherData)):
tempMinIndex, tempMinDist = getMin(allOtherData[i], xPoint, yPoint)
if (tempMinDist < currentMinDist):
currentMinIndex, currentMinDist = tempMinIndex, tempMinDist
currentMinData = i+1
if (currentMinData == learnerSelf):
epa = getSampleEPA(self.m_LearnerSamples, currentMinIndex, eEPA.evaluationSelf, eEPA.potencySelf, eEPA.activitySelf)
elif (currentMinData == learnerOther):
epa = getSampleEPA(self.m_LearnerSamples, currentMinIndex, eEPA.evaluationOther, eEPA.potencyOther, eEPA.activityOther)
elif (currentMinData == simulatorSelf):
epa = getSampleEPA(self.m_SimulatorSamples, currentMinIndex, eEPA.evaluationSelf, eEPA.potencySelf, eEPA.activitySelf)
else:
epa = getSampleEPA(self.m_SimulatorSamples, currentMinIndex, eEPA.evaluationOther, eEPA.potencyOther, eEPA.activityOther)
gender = self.m_SimInteractiveTabsPanel.m_OptionsAgentPanel.m_ClientGenderChoice.GetStringSelection()
if ("male" == gender):
estimatedIdentity = bayesact.findNearestEPAVector(epa, self.m_SimInteractiveTabsPanel.m_fidentitiesMale)
else:
estimatedIdentity = bayesact.findNearestEPAVector(epa, self.m_SimInteractiveTabsPanel.m_fidentitiesFemale)
# Those threes mean 3 decimal places
message = "You clicked on point: {}".format((round(xPoint, 3), round(yPoint, 3))) +\
"\nHere is the closest point:" +\
"\nEvaluation: {}\nPotency: {}\nActivity: {}".format(round(epa[eEPA.evaluation], 3), round(epa[eEPA.potency], 3), round(epa[eEPA.activity], 3)) +\
"\nClosest Identity: {}".format(estimatedIdentity) +\
"\nType: {}".format(cEPAConstants.m_PlotDetails[currentMinData])
wx.MessageBox(message, "Closest Point")
def changeXAxisLabel(self, iLabel):
self.m_XAxisItem = iLabel
for plotEPA2D in self.m_TwinPlots:
plotEPA2D.m_XAxisItem = iLabel
def changeYAxisLabel(self, iLabel):
self.m_YAxisItem = iLabel
for plotEPA2D in self.m_TwinPlots:
plotEPA2D.m_YAxisItem = iLabel
def shiftXAxis(self, iShiftAmount):
super(cPlotPanel, self).shiftXAxis(iShiftAmount)
self.updateAxesData()
for plotEPA2D in self.m_TwinPlots:
plotEPA2D.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax)
plotEPA2D.redrawAxes()
def shiftYAxis(self, iShiftAmount):
super(cPlotPanel, self).shiftYAxis(iShiftAmount)
self.updateAxesData()
for plotEPA2D in self.m_TwinPlots:
plotEPA2D.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax)
plotEPA2D.redrawAxes()
def zoomAxes(self, iZoomAmount):
super(cPlotPanel, self).zoomAxes(iZoomAmount)
self.updateAxesData()
for plotEPA2D in self.m_TwinPlots:
plotEPA2D.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax)
plotEPA2D.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax)
plotEPA2D.redrawAxes()
def resetAxes(self):
super(cPlotPanel, self).resetAxes()
self.updateAxesData()
for plotEPA2D in self.m_TwinPlots:
plotEPA2D.m_Axes.set_xlim(self.m_XAxisMin, self.m_XAxisMax)
plotEPA2D.m_Axes.set_ylim(self.m_YAxisMin, self.m_YAxisMax)
plotEPA2D.redrawAxes()
|
the-stack_0_10163 | from os import error
import threading
from threading import Thread
from multiprocessing import Process
import json
import sys
from put import split
from cat import cat
from remove import remove
from ls import listallfiles
from mapreduce import mapreduce
#change path to this file accordingly
dfs_setup_config = "/users/vinaynaidu/DFS/setup.json"
setupfiledir = "/users/vinaynaidu/DFS/"
f = open(dfs_setup_config)
config = json.load(f)
block_size = config['block_size']
path_to_datanodes = config['path_to_datanodes']
path_to_namenodes = config['path_to_namenodes']
replication_factor = config['replication_factor']
num_datanodes = config['num_datanodes']
datanode_size = config['datanode_size']
sync_period = config['sync_period']
datanode_log_path = config['datanode_log_path']
namenode_log_path = config['namenode_log_path']
namenode_checkpoints = config['namenode_checkpoints']
fs_path = config['fs_path']
dfs_setup_config = config['dfs_setup_config']
setupfiledir = config['dfs_setup_config'][:-10]
sys.path.append(path_to_datanodes)
sys.path.append(path_to_namenodes)
from namenode import namenodereceiveheartbeat1
from secondarynamenode import secnamenodereceiveheartbeat
dsthreads = {}
for i in range(1, num_datanodes + 1):
sys.path.append(path_to_datanodes + 'datanode{}/'.format(i))
exec("from datanode{} import datanode{}HB".format(i, i))
exec("dsthreads['datanodehbthread{}'] = threading.Thread(target = datanode{}HB, name = 'DatanodeHBThread{}')".format(i, i, i))
namenodeHBthread = threading.Thread(target=namenodereceiveheartbeat1, name='namenodeHBthread')
secnamenodeHBthread = threading.Thread(target=secnamenodereceiveheartbeat, name='secnamenodeHBthread')
namenodeHBthread.start()
secnamenodeHBthread.start()
for i in range(1, num_datanodes + 1):
dsthreads['datanodehbthread{}'.format(i)].start()
functionality = '''put, syntax - put <absolute path of the file>
cat, syntax - cat <filename>
ls, syntax - ls
rm, syntax - rm <filename>
runmapreducejob -i <absolute path of input file> -o <absolute path of output> -c <dfs setup file> -m <mapper absolute path> -r <reducer absolute path>'''
print("The default HDFS or the previous session is loaded...")
print("Provide configuration file and run createhdfs.py if you wish to create a new DFS.")
while True:
print()
print("Enter the DFS command...")
print(functionality)
print()
command = input().split()
if command[0] == "put":
if len(command) == 2:
try:
message = split(command[1])
print(message)
print()
except error as e:
print(e)
else:
print("Invalid syntax for put command")
if command[0] == "cat":
if len(command) == 2:
try:
cat(command[1])
except error as e:
print(e)
else:
print("Invalid syntax for cat command")
if command[0] == "rm":
if len(command) == 2:
try:
remove(command[1])
except error as e:
print(e)
else:
print("Invalid syntax for rm command")
if command[0] == "runmapreducejob":
if len(command) == 11:
inputfilepath = command[2]
outputfilepath = command[4]
setupfilepath = command[6]
mapperpath = command[8]
reducerpath = command[10]
mapreduce(inputfilepath, outputfilepath, setupfilepath, mapperpath, reducerpath)
else:
print("Invalid syntax for running Map Reduce job")
if command[0] == "ls":
print()
print("Files present in the DFS -")
listallfiles()
|
the-stack_0_10164 |
#python main.py --env-name "HalfCheetah-v2"
# --algo ppo
# --use-gae
# --log-interval 1
# --num-steps 2048
# --num-processes 1
# --lr 3e-4
# --entropy-coef 0
# --value-loss-coef 0.5
# --ppo-epoch 10
# --num-mini-batch 32
# --gamma 0.99
# --gae-lambda 0.95
# --num-env-steps 10000000
# --use-linear-lr-decay
# --use-proper-time-limits
# --gail
# import argparse
#
# parser = argparse.ArgumentParser()
# parser.add_argument('--sparse', action='store_true', default=True, help='GAT with sparse version or not.')
# parser.add_argument('--seed', type=int, default=72, help='Random seed.')
# parser.add_argument('--epochs', type=int, default=10000, help='Number of epochs to train.')
#
# args = parser.parse_args()
#
# print(args.sparse)
# print(args.seed)
# print(args.epochs)
import torch
import numpy as np
import random
if __name__ == '__main__':
# torch.manual_seed(1)
# #torch.cuda.manual_seed_all(args.seed)
# np.random.seed(1)
# for i in range(2):
# # for j in range(2):
# # a = np.random.rand(3)
# # print(a)
# a = np.random.rand(3)
# print(a)
# print("--------1----------")
# a = np.random.rand(3)
# print(a)
# print("--------2----------")
# a = np.random.rand(3)
# print(a)
# #print("------------------")
# print("------*------------")
# print("[4.17022005e-01 7.20324493e-01 1.14374817e-04]\
# [0.30233257 0.14675589 0.09233859]\
# [0.18626021 0.34556073 0.39676747]\
# [0.53881673 0.41919451 0.6852195 ]")#
# print("[4.17022005e-01 7.20324493e-01 1.14374817e-04 3.02332573e-01\
# 1.46755891e-01]")
# for _ in range(4):
# np.random.seed(1)
# b = np.random.choice(a)
# print(b)
# w = torch.empty(3, 5)
# print(w)
# print(torch.nn.init.orthogonal_(w))
# a = 3 / 1
# b = 3 // 1
# print("a:", a, "b:", b)
# a = [1, 2, 3, 4, 5, 6, 7, 8]
# print(a[:8])
# torch.manual_seed(1)
# a = torch.randint(1, 100, (1, 9, 2))
# #b = torch.rand(1, ).long()
# # print(b)
# # b1 = torch.tensor([1, 1, 0]).long()
# # b2 = torch.tensor([1]).long()
# # c1 = a[b1]
# # c2 = a[b2]
# print("a:", a)
# # print("b1:", b1)random
# # print("b2:", b2)
# # print("c1:", c1)
# # print("c2:", c2)
# # b = a[0, 0::4]
# b = a // 3
# print("b:", b)
# a = torch.tensor([1, 2, 3])
# d = {"A": a}
# e = d["A"]
# f = d["A"].sum()
# g = d["A"].sum().item()
# print("e:", e)
# print("f:", f)
# print("g:", g)
# for i, j in d.items():
# print("i:", i, "j:", j)
# b = 2 > 1
# print("b:", b)
a = np.random.rand(3, 4, 6)
b = np.random.rand(2, 5)
np.savez('/home/johnny/Document/Python/baselines/data/test.npz', a=a, b=b)
print("a:", a, "\n", "b:", b)
data = np.load('/home/johnny/Document/Python/baselines/data/test.npz')
print("data:", data)
print("data['a']:", data['a'], "\n", "data['b']:", data['b'])
print("len(data['a']):", len(data['a']))
c = data['a'][:len(data['a'])]
print("c:", c)
|
the-stack_0_10166 | import collections
import random
import threading
import time
import weakref
import sqlalchemy as tsa
from sqlalchemy import event
from sqlalchemy import pool
from sqlalchemy import select
from sqlalchemy import testing
from sqlalchemy.engine import default
from sqlalchemy.testing import assert_raises
from sqlalchemy.testing import assert_raises_context_ok
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import eq_
from sqlalchemy.testing import expect_raises
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing import is_not
from sqlalchemy.testing import is_true
from sqlalchemy.testing import mock
from sqlalchemy.testing.engines import testing_engine
from sqlalchemy.testing.mock import ANY
from sqlalchemy.testing.mock import call
from sqlalchemy.testing.mock import Mock
from sqlalchemy.testing.mock import patch
from sqlalchemy.testing.util import gc_collect
from sqlalchemy.testing.util import lazy_gc
join_timeout = 10
def MockDBAPI(): # noqa
def cursor():
return Mock()
def connect(*arg, **kw):
def close():
conn.closed = True
# mock seems like it might have an issue logging
# call_count correctly under threading, not sure.
# adding a side_effect for close seems to help.
conn = Mock(
cursor=Mock(side_effect=cursor),
close=Mock(side_effect=close),
closed=False,
)
return conn
def shutdown(value):
if value:
db.connect = Mock(side_effect=Exception("connect failed"))
else:
db.connect = Mock(side_effect=connect)
db.is_shutdown = value
db = Mock(
connect=Mock(side_effect=connect), shutdown=shutdown, is_shutdown=False
)
return db
class PoolTestBase(fixtures.TestBase):
def setup(self):
pool.clear_managers()
self._teardown_conns = []
def teardown(self):
for ref in self._teardown_conns:
conn = ref()
if conn:
conn.close()
@classmethod
def teardown_class(cls):
pool.clear_managers()
def _with_teardown(self, connection):
self._teardown_conns.append(weakref.ref(connection))
return connection
def _queuepool_fixture(self, **kw):
dbapi, pool = self._queuepool_dbapi_fixture(**kw)
return pool
def _queuepool_dbapi_fixture(self, **kw):
dbapi = MockDBAPI()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
class PoolTest(PoolTestBase):
@testing.fails_on(
"+pyodbc", "pyodbc cursor doesn't implement tuple __eq__"
)
@testing.fails_on("+pg8000", "returns [1], not (1,)")
def test_cursor_iterable(self):
conn = testing.db.raw_connection()
cursor = conn.cursor()
cursor.execute(str(select([1], bind=testing.db)))
expected = [(1,)]
for row in cursor:
eq_(row, expected.pop(0))
def test_no_connect_on_recreate(self):
def creator():
raise Exception("no creates allowed")
for cls in (
pool.SingletonThreadPool,
pool.StaticPool,
pool.QueuePool,
pool.NullPool,
pool.AssertionPool,
):
p = cls(creator=creator)
p.dispose()
p2 = p.recreate()
assert p2.__class__ is cls
mock_dbapi = MockDBAPI()
p = cls(creator=mock_dbapi.connect)
conn = p.connect()
conn.close()
mock_dbapi.connect.side_effect = Exception("error!")
p.dispose()
p.recreate()
def test_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.info)
self.assert_(c.info is c._connection_record.info)
c.info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.info)
c.invalidate()
c = p.connect()
self.assert_("foo" not in c.info)
c.info["foo2"] = "bar2"
c.detach()
self.assert_("foo2" in c.info)
c2 = p.connect()
is_not(c.connection, c2.connection)
assert not c2.info
assert "foo2" in c.info
def test_rec_info(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c = p.connect()
self.assert_(not c.record_info)
self.assert_(c.record_info is c._connection_record.record_info)
c.record_info["foo"] = "bar"
c.close()
del c
c = p.connect()
self.assert_("foo" in c.record_info)
c.invalidate()
c = p.connect()
self.assert_("foo" in c.record_info)
c.record_info["foo2"] = "bar2"
c.detach()
is_(c.record_info, None)
is_(c._connection_record, None)
c2 = p.connect()
assert c2.record_info
assert "foo2" in c2.record_info
def test_rec_unconnected(self):
# test production of a _ConnectionRecord with an
# initially unconnected state.
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1, connect=False)
assert not r1.connection
c1 = r1.get_connection()
is_(c1, r1.connection)
def test_rec_close_reopen(self):
# test that _ConnectionRecord.close() allows
# the record to be reusable
dbapi = MockDBAPI()
p1 = pool.Pool(creator=lambda: dbapi.connect("foo.db"))
r1 = pool._ConnectionRecord(p1)
c1 = r1.connection
c2 = r1.get_connection()
is_(c1, c2)
r1.close()
assert not r1.connection
eq_(c1.mock_calls, [call.close()])
c2 = r1.get_connection()
is_not(c1, c2)
is_(c2, r1.connection)
eq_(c2.mock_calls, [])
@testing.combinations(
(
pool.QueuePool,
dict(pool_size=8, max_overflow=10, timeout=25, use_lifo=True),
),
(pool.QueuePool, {}),
(pool.NullPool, {}),
(pool.SingletonThreadPool, {}),
(pool.StaticPool, {}),
(pool.AssertionPool, {}),
)
def test_recreate_state(self, pool_cls, pool_args):
creator = object()
pool_args["pre_ping"] = True
pool_args["reset_on_return"] = "commit"
pool_args["recycle"] = 35
pool_args["logging_name"] = "somepool"
pool_args["dialect"] = default.DefaultDialect()
pool_args["echo"] = "debug"
p1 = pool_cls(creator=creator, **pool_args)
cls_keys = dir(pool_cls)
d1 = dict(p1.__dict__)
p2 = p1.recreate()
d2 = dict(p2.__dict__)
for k in cls_keys:
d1.pop(k, None)
d2.pop(k, None)
for k in (
"_threadconns",
"_invoke_creator",
"_pool",
"_overflow_lock",
"_fairy",
"_conn",
"logger",
):
if k in d2:
d2[k] = mock.ANY
eq_(d1, d2)
eq_(p1.echo, p2.echo)
is_(p1._dialect, p2._dialect)
if "use_lifo" in pool_args:
eq_(p1._pool.use_lifo, p2._pool.use_lifo)
class PoolDialectTest(PoolTestBase):
def _dialect(self):
canary = []
class PoolDialect(object):
def do_rollback(self, dbapi_connection):
canary.append("R")
dbapi_connection.rollback()
def do_commit(self, dbapi_connection):
canary.append("C")
dbapi_connection.commit()
def do_close(self, dbapi_connection):
canary.append("CL")
dbapi_connection.close()
return PoolDialect(), canary
def _do_test(self, pool_cls, assertion):
mock_dbapi = MockDBAPI()
dialect, canary = self._dialect()
p = pool_cls(creator=mock_dbapi.connect)
p._dialect = dialect
conn = p.connect()
conn.close()
p.dispose()
p.recreate()
conn = p.connect()
conn.close()
eq_(canary, assertion)
def test_queue_pool(self):
self._do_test(pool.QueuePool, ["R", "CL", "R"])
def test_assertion_pool(self):
self._do_test(pool.AssertionPool, ["R", "CL", "R"])
def test_singleton_pool(self):
self._do_test(pool.SingletonThreadPool, ["R", "CL", "R"])
def test_null_pool(self):
self._do_test(pool.NullPool, ["R", "CL", "R", "CL"])
def test_static_pool(self):
self._do_test(pool.StaticPool, ["R", "R"])
class PoolEventsTest(PoolTestBase):
def _first_connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def first_connect(*arg, **kw):
canary.append("first_connect")
event.listen(p, "first_connect", first_connect)
return p, canary
def _connect_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def connect(*arg, **kw):
canary.append("connect")
event.listen(p, "connect", connect)
return p, canary
def _checkout_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkout(*arg, **kw):
canary.append("checkout")
event.listen(p, "checkout", checkout)
return p, canary
def _checkin_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def checkin(*arg, **kw):
canary.append("checkin")
event.listen(p, "checkin", checkin)
return p, canary
def _reset_event_fixture(self):
p = self._queuepool_fixture()
canary = []
def reset(*arg, **kw):
canary.append("reset")
event.listen(p, "reset", reset)
return p, canary
def _invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "invalidate", canary)
return p, canary
def _soft_invalidate_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "soft_invalidate", canary)
return p, canary
def _close_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close", canary)
return p, canary
def _detach_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "detach", canary)
return p, canary
def _close_detached_event_fixture(self):
p = self._queuepool_fixture()
canary = Mock()
event.listen(p, "close_detached", canary)
return p, canary
def test_close(self):
p, canary = self._close_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.close()
eq_(canary.mock_calls, [])
p.dispose()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach(self):
p, canary = self._detach_event_fixture()
c1 = p.connect()
connection = c1.connection
rec = c1._connection_record
c1.detach()
eq_(canary.mock_calls, [call(connection, rec)])
def test_detach_close(self):
p, canary = self._close_detached_event_fixture()
c1 = p.connect()
connection = c1.connection
c1.detach()
c1.close()
eq_(canary.mock_calls, [call(connection)])
def test_first_connect_event(self):
p, canary = self._first_connect_event_fixture()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_event_fires_once(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p.connect()
eq_(canary, ["first_connect"])
def test_first_connect_on_previously_recreated(self):
p, canary = self._first_connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_first_connect_on_subsequently_recreated(self):
p, canary = self._first_connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["first_connect", "first_connect"])
def test_connect_event(self):
p, canary = self._connect_event_fixture()
p.connect()
eq_(canary, ["connect"])
def test_connect_insert_event(self):
p = self._queuepool_fixture()
canary = []
def connect_one(*arg, **kw):
canary.append("connect_one")
def connect_two(*arg, **kw):
canary.append("connect_two")
def connect_three(*arg, **kw):
canary.append("connect_three")
event.listen(p, "connect", connect_one)
event.listen(p, "connect", connect_two, insert=True)
event.listen(p, "connect", connect_three)
p.connect()
eq_(canary, ["connect_two", "connect_one", "connect_three"])
def test_connect_event_fires_subsequent(self):
p, canary = self._connect_event_fixture()
c1 = p.connect() # noqa
c2 = p.connect() # noqa
eq_(canary, ["connect", "connect"])
def test_connect_on_previously_recreated(self):
p, canary = self._connect_event_fixture()
p2 = p.recreate()
p.connect()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_connect_on_subsequently_recreated(self):
p, canary = self._connect_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["connect", "connect"])
def test_checkout_event(self):
p, canary = self._checkout_event_fixture()
p.connect()
eq_(canary, ["checkout"])
def test_checkout_event_fires_subsequent(self):
p, canary = self._checkout_event_fixture()
p.connect()
p.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkout_event_on_subsequently_recreated(self):
p, canary = self._checkout_event_fixture()
p.connect()
p2 = p.recreate()
p2.connect()
eq_(canary, ["checkout", "checkout"])
def test_checkin_event(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
def test_reset_event(self):
p, canary = self._reset_event_fixture()
c1 = p.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["reset"])
def test_soft_invalidate_event_no_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate(soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_soft_invalidate_event_exception(self):
p, canary = self._soft_invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc, soft=True)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
def test_invalidate_event_no_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
c1.invalidate()
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is None
def test_invalidate_event_exception(self):
p, canary = self._invalidate_event_fixture()
c1 = p.connect()
c1.close()
assert not canary.called
c1 = p.connect()
dbapi_con = c1.connection
exc = Exception("hi")
c1.invalidate(exc)
assert canary.call_args_list[0][0][0] is dbapi_con
assert canary.call_args_list[0][0][2] is exc
@testing.requires.predictable_gc
def test_checkin_event_gc(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
eq_(canary, [])
del c1
lazy_gc()
eq_(canary, ["checkin"])
def test_checkin_event_on_subsequently_recreated(self):
p, canary = self._checkin_event_fixture()
c1 = p.connect()
p2 = p.recreate()
c2 = p2.connect()
eq_(canary, [])
c1.close()
eq_(canary, ["checkin"])
c2.close()
eq_(canary, ["checkin", "checkin"])
def test_listen_targets_scope(self):
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
def listen_four(*args):
canary.append("listen_four")
engine = testing_engine(testing.db.url)
event.listen(pool.Pool, "connect", listen_one)
event.listen(engine.pool, "connect", listen_two)
event.listen(engine, "connect", listen_three)
event.listen(engine.__class__, "connect", listen_four)
engine.execute(select(1)).close()
eq_(
canary, ["listen_one", "listen_four", "listen_two", "listen_three"]
)
def test_listen_targets_per_subclass(self):
"""test that listen() called on a subclass remains specific to
that subclass."""
canary = []
def listen_one(*args):
canary.append("listen_one")
def listen_two(*args):
canary.append("listen_two")
def listen_three(*args):
canary.append("listen_three")
event.listen(pool.Pool, "connect", listen_one)
event.listen(pool.QueuePool, "connect", listen_two)
event.listen(pool.SingletonThreadPool, "connect", listen_three)
p1 = pool.QueuePool(creator=MockDBAPI().connect)
p2 = pool.SingletonThreadPool(creator=MockDBAPI().connect)
assert listen_one in p1.dispatch.connect
assert listen_two in p1.dispatch.connect
assert listen_three not in p1.dispatch.connect
assert listen_one in p2.dispatch.connect
assert listen_two not in p2.dispatch.connect
assert listen_three in p2.dispatch.connect
p1.connect()
eq_(canary, ["listen_one", "listen_two"])
p2.connect()
eq_(canary, ["listen_one", "listen_two", "listen_one", "listen_three"])
def test_connect_event_fails_invalidates(self):
fail = False
def listen_one(conn, rec):
if fail:
raise Exception("it failed")
def listen_two(conn, rec):
rec.info["important_flag"] = True
p1 = pool.QueuePool(
creator=MockDBAPI().connect, pool_size=1, max_overflow=0
)
event.listen(p1, "connect", listen_one)
event.listen(p1, "connect", listen_two)
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.invalidate()
conn.close()
fail = True
assert_raises(Exception, p1.connect)
fail = False
conn = p1.connect()
eq_(conn.info["important_flag"], True)
conn.close()
def teardown(self):
# TODO: need to get remove() functionality
# going
pool.Pool.dispatch._clear()
class PoolFirstConnectSyncTest(PoolTestBase):
# test [ticket:2964]
@testing.requires.timing_intensive
def test_sync(self):
pool = self._queuepool_fixture(pool_size=3, max_overflow=0)
evt = Mock()
@event.listens_for(pool, "first_connect")
def slow_first_connect(dbapi_con, rec):
time.sleep(1)
evt.first_connect()
@event.listens_for(pool, "connect")
def on_connect(dbapi_con, rec):
evt.connect()
def checkout():
for j in range(2):
c1 = pool.connect()
time.sleep(0.02)
c1.close()
time.sleep(0.02)
threads = []
# what we're trying to do here is have concurrent use of
# all three pooled connections at once, and the thing we want
# to test is that first_connect() finishes completely before
# any of the connections get returned. so first_connect()
# sleeps for one second, then pings the mock. the threads should
# not have made it to the "checkout() event for that one second.
for i in range(5):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
# there is a very unlikely condition observed in CI on windows
# where even though we have five threads above all calling upon the
# pool, we didn't get concurrent use of all three connections, two
# connections were enough. so here we purposely just check out
# all three at once just to get a consistent test result.
make_sure_all_three_are_connected = [pool.connect() for i in range(3)]
for conn in make_sure_all_three_are_connected:
conn.close()
eq_(
evt.mock_calls,
[
call.first_connect(),
call.connect(),
call.connect(),
call.connect(),
],
)
class QueuePoolTest(PoolTestBase):
def test_queuepool_del(self):
self._do_testqueuepool(useclose=False)
def test_queuepool_close(self):
self._do_testqueuepool(useclose=True)
def _do_testqueuepool(self, useclose=False):
p = self._queuepool_fixture(pool_size=3, max_overflow=-1)
reaper = testing.engines.ConnectionKiller()
reaper.add_pool(p)
def status(pool):
return (
pool.size(),
pool.checkedin(),
pool.overflow(),
pool.checkedout(),
)
c1 = p.connect()
self.assert_(status(p) == (3, 0, -2, 1))
c2 = p.connect()
self.assert_(status(p) == (3, 0, -1, 2))
c3 = p.connect()
self.assert_(status(p) == (3, 0, 0, 3))
c4 = p.connect()
self.assert_(status(p) == (3, 0, 1, 4))
c5 = p.connect()
self.assert_(status(p) == (3, 0, 2, 5))
c6 = p.connect()
self.assert_(status(p) == (3, 0, 3, 6))
if useclose:
c4.close()
c3.close()
c2.close()
else:
c4 = c3 = c2 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 3, 3))
if useclose:
c1.close()
c5.close()
c6.close()
else:
c1 = c5 = c6 = None
lazy_gc()
self.assert_(status(p) == (3, 3, 0, 0))
c1 = p.connect()
c2 = p.connect()
self.assert_(status(p) == (3, 1, 0, 2), status(p))
if useclose:
c2.close()
else:
c2 = None
lazy_gc()
self.assert_(status(p) == (3, 2, 0, 1))
c1.close()
reaper.assert_all_closed()
def test_timeout_accessor(self):
expected_timeout = 123
p = self._queuepool_fixture(timeout=expected_timeout)
eq_(p.timeout(), expected_timeout)
@testing.requires.timing_intensive
def test_timeout(self):
p = self._queuepool_fixture(pool_size=3, max_overflow=0, timeout=2)
c1 = p.connect() # noqa
c2 = p.connect() # noqa
c3 = p.connect() # noqa
now = time.time()
assert_raises(tsa.exc.TimeoutError, p.connect)
assert int(time.time() - now) == 2
@testing.requires.timing_intensive
def test_timeout_subsecond_precision(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0, timeout=0.5)
c1 = p.connect() # noqa
with expect_raises(tsa.exc.TimeoutError):
now = time.time()
c2 = p.connect() # noqa
# Python timing is not very accurate, the time diff should be very
# close to 0.5s but we give 200ms of slack.
assert 0.3 <= time.time() - now <= 0.7, "Pool timeout not respected"
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_timeout_race(self):
# test a race condition where the initial connecting threads all race
# to queue.Empty, then block on the mutex. each thread consumes a
# connection as they go in. when the limit is reached, the remaining
# threads go in, and get TimeoutError; even though they never got to
# wait for the timeout on queue.get(). the fix involves checking the
# timeout again within the mutex, and if so, unlocking and throwing
# them back to the start of do_get()
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=lambda: dbapi.connect(delay=0.05),
pool_size=2,
max_overflow=1,
timeout=3,
)
timeouts = []
def checkout():
for x in range(1):
now = time.time()
try:
c1 = p.connect()
except tsa.exc.TimeoutError:
timeouts.append(time.time() - now)
continue
time.sleep(4)
c1.close()
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
assert len(timeouts) > 0
for t in timeouts:
assert t >= 3, "Not all timeouts were >= 3 seconds %r" % timeouts
# normally, the timeout should under 4 seconds,
# but on a loaded down buildbot it can go up.
assert t < 14, "Not all timeouts were < 14 seconds %r" % timeouts
def _test_overflow(self, thread_count, max_overflow):
reaper = testing.engines.ConnectionKiller()
dbapi = MockDBAPI()
mutex = threading.Lock()
def creator():
time.sleep(0.05)
with mutex:
return dbapi.connect()
p = pool.QueuePool(
creator=creator, pool_size=3, timeout=2, max_overflow=max_overflow
)
reaper.add_pool(p)
peaks = []
def whammy():
for i in range(10):
try:
con = p.connect()
time.sleep(0.005)
peaks.append(p.overflow())
con.close()
del con
except tsa.exc.TimeoutError:
pass
threads = []
for i in range(thread_count):
th = threading.Thread(target=whammy)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
self.assert_(max(peaks) <= max_overflow)
reaper.assert_all_closed()
def test_overflow_reset_on_failed_connect(self):
dbapi = Mock()
def failing_dbapi():
raise Exception("connection failed")
creator = dbapi.connect
def create():
return creator()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
c1 = self._with_teardown(p.connect()) # noqa
c2 = self._with_teardown(p.connect()) # noqa
c3 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1)
creator = failing_dbapi
assert_raises(Exception, p.connect)
eq_(p._overflow, 1)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_hanging_connect_within_overflow(self):
"""test that a single connect() call which is hanging
does not block other connections from proceeding."""
dbapi = Mock()
mutex = threading.Lock()
def hanging_dbapi():
time.sleep(2)
with mutex:
return dbapi.connect()
def fast_dbapi():
with mutex:
return dbapi.connect()
creator = threading.local()
def create():
return creator.mock_connector()
def run_test(name, pool, should_hang):
if should_hang:
creator.mock_connector = hanging_dbapi
else:
creator.mock_connector = fast_dbapi
conn = pool.connect()
conn.operation(name)
time.sleep(1)
conn.close()
p = pool.QueuePool(creator=create, pool_size=2, max_overflow=3)
threads = [
threading.Thread(target=run_test, args=("success_one", p, False)),
threading.Thread(target=run_test, args=("success_two", p, False)),
threading.Thread(target=run_test, args=("overflow_one", p, True)),
threading.Thread(target=run_test, args=("overflow_two", p, False)),
threading.Thread(
target=run_test, args=("overflow_three", p, False)
),
]
for t in threads:
t.start()
time.sleep(0.2)
for t in threads:
t.join(timeout=join_timeout)
eq_(
dbapi.connect().operation.mock_calls,
[
call("success_one"),
call("success_two"),
call("overflow_two"),
call("overflow_three"),
call("overflow_one"),
],
)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_waiters_handled(self):
"""test that threads waiting for connections are
handled when the pool is replaced.
"""
mutex = threading.Lock()
dbapi = MockDBAPI()
def creator():
with mutex:
return dbapi.connect()
success = []
for timeout in (None, 30):
for max_overflow in (0, -1, 3):
p = pool.QueuePool(
creator=creator,
pool_size=2,
timeout=timeout,
max_overflow=max_overflow,
)
def waiter(p, timeout, max_overflow):
success_key = (timeout, max_overflow)
conn = p.connect()
success.append(success_key)
time.sleep(0.1)
conn.close()
c1 = p.connect() # noqa
c2 = p.connect()
threads = []
for i in range(2):
t = threading.Thread(
target=waiter, args=(p, timeout, max_overflow)
)
t.daemon = True
t.start()
threads.append(t)
# this sleep makes sure that the
# two waiter threads hit upon wait()
# inside the queue, before we invalidate the other
# two conns
time.sleep(0.2)
p._invalidate(c2)
for t in threads:
t.join(join_timeout)
eq_(len(success), 12, "successes: %s" % success)
def test_connrec_invalidated_within_checkout_no_race(self):
"""Test that a concurrent ConnectionRecord.invalidate() which
occurs after the ConnectionFairy has called
_ConnectionRecord.checkout()
but before the ConnectionFairy tests "fairy.connection is None"
will not result in an InvalidRequestError.
This use case assumes that a listener on the checkout() event
will be raising DisconnectionError so that a reconnect attempt
may occur.
"""
dbapi = MockDBAPI()
def creator():
return dbapi.connect()
p = pool.QueuePool(creator=creator, pool_size=1, max_overflow=0)
conn = p.connect()
conn.close()
_existing_checkout = pool._ConnectionRecord.checkout
@classmethod
def _decorate_existing_checkout(cls, *arg, **kw):
fairy = _existing_checkout(*arg, **kw)
connrec = fairy._connection_record
connrec.invalidate()
return fairy
with patch(
"sqlalchemy.pool._ConnectionRecord.checkout",
_decorate_existing_checkout,
):
conn = p.connect()
is_(conn._connection_record.connection, None)
conn.close()
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_notify_waiters(self):
dbapi = MockDBAPI()
canary = []
def creator():
canary.append(1)
return dbapi.connect()
p1 = pool.QueuePool(
creator=creator, pool_size=1, timeout=None, max_overflow=0
)
def waiter(p):
conn = p.connect()
canary.append(2)
time.sleep(0.5)
conn.close()
c1 = p1.connect()
threads = []
for i in range(5):
t = threading.Thread(target=waiter, args=(p1,))
t.start()
threads.append(t)
time.sleep(0.5)
eq_(canary, [1])
# this also calls invalidate()
# on c1
p1._invalidate(c1)
for t in threads:
t.join(join_timeout)
eq_(canary, [1, 1, 2, 2, 2, 2, 2])
def test_dispose_closes_pooled(self):
dbapi = MockDBAPI()
p = pool.QueuePool(
creator=dbapi.connect, pool_size=2, timeout=None, max_overflow=0
)
c1 = p.connect()
c2 = p.connect()
c1_con = c1.connection
c2_con = c2.connection
c1.close()
eq_(c1_con.close.call_count, 0)
eq_(c2_con.close.call_count, 0)
p.dispose()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# currently, if a ConnectionFairy is closed
# after the pool has been disposed, there's no
# flag that states it should be invalidated
# immediately - it just gets returned to the
# pool normally...
c2.close()
eq_(c1_con.close.call_count, 1)
eq_(c2_con.close.call_count, 0)
# ...and that's the one we'll get back next.
c3 = p.connect()
assert c3.connection is c2_con
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_no_overflow(self):
self._test_overflow(40, 0)
@testing.requires.threading_with_mock
@testing.requires.timing_intensive
def test_max_overflow(self):
self._test_overflow(40, 5)
def test_overflow_no_gc(self):
p = self._queuepool_fixture(pool_size=2, max_overflow=2)
# disable weakref collection of the
# underlying connections
strong_refs = set()
def _conn():
c = p.connect()
strong_refs.add(c.connection)
return c
for j in range(5):
# open 4 conns at a time. each time this
# will yield two pooled connections + two
# overflow connections.
conns = [_conn() for i in range(4)]
for c in conns:
c.close()
# doing that for a total of 5 times yields
# ten overflow connections closed plus the
# two pooled connections unclosed.
eq_(
set([c.close.call_count for c in strong_refs]),
set([1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0]),
)
def test_recycle(self):
with patch("sqlalchemy.pool.base.time.time") as mock:
mock.return_value = 10000
p = self._queuepool_fixture(
pool_size=1, max_overflow=0, recycle=30
)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
mock.return_value = 10001
c2 = p.connect()
is_(c2.connection, c_ref())
c2.close()
mock.return_value = 10035
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
p._invalidate(c2)
assert c2_rec.connection is None
c2.close()
time.sleep(0.5)
c3 = p.connect()
is_not(c3.connection, c_ref())
@testing.requires.timing_intensive
def test_recycle_on_soft_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_ref = weakref.ref(c1.connection)
c1.close()
c2 = p.connect()
is_(c2.connection, c_ref())
c2_rec = c2._connection_record
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
c2.invalidate(soft=True)
is_(c2_rec.connection, c2.connection)
c2.close()
c3 = p.connect()
is_not(c3.connection, c_ref())
is_(c3._connection_record, c2_rec)
is_(c2_rec.connection, c3.connection)
def _no_wr_finalize(self):
finalize_fairy = pool._finalize_fairy
def assert_no_wr_callback(
connection, connection_record, pool, ref, echo, fairy=None
):
if fairy is None:
raise AssertionError(
"finalize fairy was called as a weakref callback"
)
return finalize_fairy(
connection, connection_record, pool, ref, echo, fairy
)
return patch.object(pool, "_finalize_fairy", assert_no_wr_callback)
def _assert_cleanup_on_pooled_reconnect(self, dbapi, p):
# p is QueuePool with size=1, max_overflow=2,
# and one connection in the pool that will need to
# reconnect when next used (either due to recycle or invalidate)
with self._no_wr_finalize():
eq_(p.checkedout(), 0)
eq_(p._overflow, 0)
dbapi.shutdown(True)
assert_raises_context_ok(Exception, p.connect)
eq_(p._overflow, 0)
eq_(p.checkedout(), 0) # and not 1
dbapi.shutdown(False)
c1 = self._with_teardown(p.connect()) # noqa
assert p._pool.empty() # poolsize is one, so we're empty OK
c2 = self._with_teardown(p.connect()) # noqa
eq_(p._overflow, 1) # and not 2
# this hangs if p._overflow is 2
c3 = self._with_teardown(p.connect())
c3.close()
def test_error_on_pooled_reconnect_cleanup_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.invalidate()
c1.close()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_error_on_pooled_reconnect_cleanup_recycle(self):
dbapi, p = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2, recycle=1
)
c1 = p.connect()
c1.close()
time.sleep(1.5)
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.timing_intensive
def test_connect_handler_not_called_for_recycled(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
canary = Mock()
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
event.listen(p, "connect", canary.connect)
event.listen(p, "checkout", canary.checkout)
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
eq_(
canary.mock_calls,
[call.connect(ANY, ANY), call.checkout(ANY, ANY, ANY)],
)
@testing.requires.timing_intensive
def test_connect_checkout_handler_always_gets_info(self):
"""test [ticket:3497]"""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=2, max_overflow=2)
c1 = p.connect()
c2 = p.connect()
c1.close()
c2.close()
dbapi.shutdown(True)
# ensure pool invalidate time will be later than starttime
# for ConnectionRecord objects above
time.sleep(0.1)
bad = p.connect()
p._invalidate(bad)
bad.close()
assert p._invalidate_time
@event.listens_for(p, "connect")
def connect(conn, conn_rec):
conn_rec.info["x"] = True
@event.listens_for(p, "checkout")
def checkout(conn, conn_rec, conn_f):
assert "x" in conn_rec.info
assert_raises(Exception, p.connect)
p._pool.queue = collections.deque(
[c for c in p._pool.queue if c.connection is not None]
)
dbapi.shutdown(False)
c = p.connect()
c.close()
def test_error_on_pooled_reconnect_cleanup_wcheckout_event(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=2)
c1 = p.connect()
c1.close()
@event.listens_for(p, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if dbapi.is_shutdown:
raise tsa.exc.DisconnectionError()
self._assert_cleanup_on_pooled_reconnect(dbapi, p)
@testing.requires.predictable_gc
def test_userspace_disconnectionerror_weakref_finalizer(self):
dbapi, pool = self._queuepool_dbapi_fixture(
pool_size=1, max_overflow=2
)
@event.listens_for(pool, "checkout")
def handle_checkout_event(dbapi_con, con_record, con_proxy):
if getattr(dbapi_con, "boom") == "yes":
raise tsa.exc.DisconnectionError()
conn = pool.connect()
old_dbapi_conn = conn.connection
conn.close()
eq_(old_dbapi_conn.mock_calls, [call.rollback()])
old_dbapi_conn.boom = "yes"
conn = pool.connect()
dbapi_conn = conn.connection
del conn
gc_collect()
# new connection was reset on return appropriately
eq_(dbapi_conn.mock_calls, [call.rollback()])
# old connection was just closed - did not get an
# erroneous reset on return
eq_(old_dbapi_conn.mock_calls, [call.rollback(), call.close()])
@testing.requires.timing_intensive
def test_recycle_pool_no_race(self):
def slow_close():
slow_closing_connection._slow_close()
time.sleep(0.5)
slow_closing_connection = Mock()
slow_closing_connection.connect.return_value.close = slow_close
class Error(Exception):
pass
dialect = Mock()
dialect.is_disconnect = lambda *arg, **kw: True
dialect.dbapi.Error = Error
pools = []
class TrackQueuePool(pool.QueuePool):
def __init__(self, *arg, **kw):
pools.append(self)
super(TrackQueuePool, self).__init__(*arg, **kw)
def creator():
return slow_closing_connection.connect()
p1 = TrackQueuePool(creator=creator, pool_size=20)
from sqlalchemy import create_engine
eng = create_engine(testing.db.url, pool=p1, _initialize=False)
eng.dialect = dialect
# 15 total connections
conns = [eng.connect() for i in range(15)]
# return 8 back to the pool
for conn in conns[3:10]:
conn.close()
def attempt(conn):
time.sleep(random.random())
try:
conn._handle_dbapi_exception(
Error(), "statement", {}, Mock(), Mock()
)
except tsa.exc.DBAPIError:
pass
# run an error + invalidate operation on the remaining 7 open
# connections
threads = []
for conn in conns:
t = threading.Thread(target=attempt, args=(conn,))
t.start()
threads.append(t)
for t in threads:
t.join()
# return all 15 connections to the pool
for conn in conns:
conn.close()
# re-open 15 total connections
conns = [eng.connect() for i in range(15)]
# 15 connections have been fully closed due to invalidate
assert slow_closing_connection._slow_close.call_count == 15
# 15 initial connections + 15 reconnections
assert slow_closing_connection.connect.call_count == 30
assert len(pools) <= 2, len(pools)
def test_invalidate(self):
p = self._queuepool_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_recreate(self):
p = self._queuepool_fixture(
reset_on_return=None, pool_size=1, max_overflow=0
)
p2 = p.recreate()
assert p2.size() == 1
assert p2._reset_on_return is pool.reset_none
assert p2._max_overflow == 0
def test_reconnect(self):
"""tests reconnect operations at the pool level. SA's
engine/dialect includes another layer of reconnect support for
'database was lost' errors."""
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c_id = c1.connection.id
c1.close()
c1 = None
c1 = p.connect()
assert c1.connection.id == c_id
dbapi.raise_error = True
c1.invalidate()
c1 = None
c1 = p.connect()
assert c1.connection.id != c_id
def test_detach(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1.detach()
c2 = p.connect() # noqa
eq_(dbapi.connect.mock_calls, [call("foo.db"), call("foo.db")])
c1_con = c1.connection
assert c1_con is not None
eq_(c1_con.close.call_count, 0)
c1.close()
eq_(c1_con.close.call_count, 1)
def test_detach_via_invalidate(self):
dbapi, p = self._queuepool_dbapi_fixture(pool_size=1, max_overflow=0)
c1 = p.connect()
c1_con = c1.connection
c1.invalidate()
assert c1.connection is None
eq_(c1_con.close.call_count, 1)
c2 = p.connect()
assert c2.connection is not c1_con
c2_con = c2.connection
c2.close()
eq_(c2_con.close.call_count, 0)
def test_no_double_checkin(self):
p = self._queuepool_fixture(pool_size=1)
c1 = p.connect()
rec = c1._connection_record
c1.close()
assert_raises_message(
Warning, "Double checkin attempted on %s" % rec, rec.checkin
)
def test_lifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator, use_lifo=True)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
for i in range(5):
pc1 = p.connect()
is_(pc1.connection, c3)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c3)
pc2 = p.connect()
is_(pc2.connection, c2)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c1)
pc2.close()
pc3.close()
pc1.close()
def test_fifo(self):
c1, c2, c3 = Mock(), Mock(), Mock()
connections = [c1, c2, c3]
def creator():
return connections.pop(0)
p = pool.QueuePool(creator)
pc1 = p.connect()
pc2 = p.connect()
pc3 = p.connect()
pc1.close()
pc2.close()
pc3.close()
pc1 = p.connect()
is_(pc1.connection, c1)
pc1.close()
pc1 = p.connect()
is_(pc1.connection, c2)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3 = p.connect()
is_(pc3.connection, c1)
pc2 = p.connect()
is_(pc2.connection, c3)
pc2.close()
pc3.close()
pc1.close()
class ResetOnReturnTest(PoolTestBase):
def _fixture(self, **kw):
dbapi = Mock()
return (
dbapi,
pool.QueuePool(creator=lambda: dbapi.connect("foo.db"), **kw),
)
def test_plain_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
c1 = p.connect()
c1.close()
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_plain_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_plain_none(self):
dbapi, p = self._fixture(reset_on_return=None)
c1 = p.connect()
c1.close()
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_rollback(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
is_active = True
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert dbapi.connect().special_rollback.called
assert not dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 1)
eq_(dbapi.connect().special_commit.call_count, 0)
assert dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
def test_agent_commit(self):
dbapi, p = self._fixture(reset_on_return="commit")
class Agent(object):
def __init__(self, conn):
self.conn = conn
is_active = True
def rollback(self):
self.conn.special_rollback()
def commit(self):
self.conn.special_commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
assert not dbapi.connect().special_rollback.called
assert dbapi.connect().special_commit.called
assert not dbapi.connect().rollback.called
assert not dbapi.connect().commit.called
c1 = p.connect()
c1.close()
eq_(dbapi.connect().special_rollback.call_count, 0)
eq_(dbapi.connect().special_commit.call_count, 1)
assert not dbapi.connect().rollback.called
assert dbapi.connect().commit.called
def test_reset_agent_disconnect(self):
dbapi, p = self._fixture(reset_on_return="rollback")
class Agent(object):
def __init__(self, conn):
self.conn = conn
def rollback(self):
p._invalidate(self.conn)
raise Exception("hi")
def commit(self):
self.conn.commit()
c1 = p.connect()
c1._reset_agent = Agent(c1)
c1.close()
# no warning raised. We know it would warn due to
# QueuePoolTest.test_no_double_checkin
class SingletonThreadPoolTest(PoolTestBase):
@testing.requires.threading_with_mock
def test_cleanup(self):
self._test_cleanup(False)
# TODO: the SingletonThreadPool cleanup method
# has an unfixed race condition within the "cleanup" system that
# leads to this test being off by one connection under load; in any
# case, this connection will be closed once it is garbage collected.
# this pool is not a production-level pool and is only used for the
# SQLite "memory" connection, and is not very useful under actual
# multi-threaded conditions
# @testing.requires.threading_with_mock
# def test_cleanup_no_gc(self):
# self._test_cleanup(True)
def _test_cleanup(self, strong_refs):
"""test that the pool's connections are OK after cleanup() has
been called."""
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
if strong_refs:
sr = set()
def _conn():
c = p.connect()
sr.add(c.connection)
return c
else:
def _conn():
return p.connect()
def checkout():
for x in range(10):
c = _conn()
assert c
c.cursor()
c.close()
time.sleep(0.01)
threads = []
for i in range(10):
th = threading.Thread(target=checkout)
th.start()
threads.append(th)
for th in threads:
th.join(join_timeout)
lp = len(p._all_conns)
is_true(3 <= lp <= 4)
if strong_refs:
still_opened = len([c for c in sr if not c.close.call_count])
eq_(still_opened, 3)
def test_no_rollback_from_nested_connections(self):
dbapi = MockDBAPI()
lock = threading.Lock()
def creator():
# the mock iterator isn't threadsafe...
with lock:
return dbapi.connect()
p = pool.SingletonThreadPool(creator=creator, pool_size=3)
c1 = p.connect()
mock_conn = c1.connection
c2 = p.connect()
is_(c1, c2)
c2.close()
eq_(mock_conn.mock_calls, [])
c1.close()
eq_(mock_conn.mock_calls, [call.rollback()])
class AssertionPoolTest(PoolTestBase):
def test_connect_error(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
def test_connect_multiple(self):
dbapi = MockDBAPI()
p = pool.AssertionPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c2 = p.connect()
c2.close()
c3 = p.connect() # noqa
assert_raises(AssertionError, p.connect)
class NullPoolTest(PoolTestBase):
def test_reconnect(self):
dbapi = MockDBAPI()
p = pool.NullPool(creator=lambda: dbapi.connect("foo.db"))
c1 = p.connect()
c1.close()
c1 = None
c1 = p.connect()
c1.invalidate()
c1 = None
c1 = p.connect()
dbapi.connect.assert_has_calls(
[call("foo.db"), call("foo.db")], any_order=True
)
class StaticPoolTest(PoolTestBase):
def test_recreate(self):
dbapi = MockDBAPI()
def creator():
return dbapi.connect("foo.db")
p = pool.StaticPool(creator)
p2 = p.recreate()
assert p._creator is p2._creator
class CreatorCompatibilityTest(PoolTestBase):
def test_creator_callable_outside_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator()
finally:
conn.close()
def test_creator_callable_outside_witharg(self):
e = testing_engine()
creator = e.pool._creator
try:
conn = creator(Mock())
finally:
conn.close()
def test_creator_patching_arg_to_noarg(self):
e = testing_engine()
creator = e.pool._creator
try:
# the creator is the two-arg form
conn = creator(Mock())
finally:
conn.close()
def mock_create():
return creator()
conn = e.connect()
conn.invalidate()
conn.close()
# test that the 'should_wrap_creator' status
# will dynamically switch if the _creator is monkeypatched.
# patch it with a zero-arg form
with patch.object(e.pool, "_creator", mock_create):
conn = e.connect()
conn.invalidate()
conn.close()
conn = e.connect()
conn.close()
|
the-stack_0_10168 | # -*- coding: utf-8 -*-
# Authors: Mark Wronkiewicz <[email protected]>
# Yousra Bekhti <[email protected]>
# Eric Larson <[email protected]>
#
# License: BSD-3-Clause
from collections.abc import Iterable
import numpy as np
from ..event import _get_stim_channel
from .._ola import _Interp2
from ..io.pick import (pick_types, pick_info, pick_channels,
pick_channels_forward)
from ..cov import make_ad_hoc_cov, read_cov, Covariance
from ..bem import fit_sphere_to_headshape, make_sphere_model, read_bem_solution
from ..io import RawArray, BaseRaw, Info
from ..chpi import (read_head_pos, head_pos_to_trans_rot_t, get_chpi_info,
_get_hpi_initial_fit)
from ..io.constants import FIFF
from ..forward import (_magnetic_dipole_field_vec, _merge_meg_eeg_fwds,
_stc_src_sel, convert_forward_solution,
_prepare_for_forward, _transform_orig_meg_coils,
_compute_forwards, _to_forward_dict,
restrict_forward_to_stc, _prep_meg_channels)
from ..transforms import _get_trans, transform_surface_to
from ..source_space import (_ensure_src, _set_source_space_vertices,
setup_volume_source_space)
from ..source_estimate import _BaseSourceEstimate
from ..surface import _CheckInside
from ..utils import (logger, verbose, check_random_state, _pl, _validate_type,
_check_preload)
from ..parallel import check_n_jobs
from .source import SourceSimulator
def _check_cov(info, cov):
"""Check that the user provided a valid covariance matrix for the noise."""
if isinstance(cov, Covariance) or cov is None:
pass
elif isinstance(cov, dict):
cov = make_ad_hoc_cov(info, cov, verbose=False)
elif isinstance(cov, str):
if cov == 'simple':
cov = make_ad_hoc_cov(info, None, verbose=False)
else:
cov = read_cov(cov, verbose=False)
else:
raise TypeError('Covariance matrix type not recognized. Valid input '
'types are: instance of Covariance, dict, str, None. '
', got %s' % (cov,))
return cov
def _check_stc_iterable(stc, info):
# 1. Check that our STC is iterable (or convert it to one using cycle)
# 2. Do first iter so we can get the vertex subselection
# 3. Get the list of verts, which must stay the same across iterations
if isinstance(stc, _BaseSourceEstimate):
stc = [stc]
_validate_type(stc, Iterable, 'SourceEstimate, tuple, or iterable')
stc_enum = enumerate(stc)
del stc
try:
stc_counted = next(stc_enum)
except StopIteration:
raise RuntimeError('Iterable did not provide stc[0]')
_, _, verts = _stc_data_event(stc_counted, 1, info['sfreq'])
return stc_enum, stc_counted, verts
def _log_ch(start, info, ch):
"""Log channel information."""
if ch is not None:
extra, just, ch = ' stored on channel:', 50, info['ch_names'][ch]
else:
extra, just, ch = ' not stored', 0, ''
logger.info((start + extra).ljust(just) + ch)
def _check_head_pos(head_pos, info, first_samp, times=None):
if head_pos is None: # use pos from info['dev_head_t']
head_pos = dict()
if isinstance(head_pos, str): # can be a head pos file
head_pos = read_head_pos(head_pos)
if isinstance(head_pos, np.ndarray): # can be head_pos quats
head_pos = head_pos_to_trans_rot_t(head_pos)
if isinstance(head_pos, tuple): # can be quats converted to trans, rot, t
transs, rots, ts = head_pos
first_time = first_samp / info['sfreq']
ts = ts - first_time # MF files need reref
dev_head_ts = [np.r_[np.c_[r, t[:, np.newaxis]], [[0, 0, 0, 1]]]
for r, t in zip(rots, transs)]
del transs, rots
elif isinstance(head_pos, dict):
ts = np.array(list(head_pos.keys()), float)
ts.sort()
dev_head_ts = [head_pos[float(tt)] for tt in ts]
else:
raise TypeError('unknown head_pos type %s' % type(head_pos))
bad = ts < 0
if bad.any():
raise RuntimeError('All position times must be >= 0, found %s/%s'
'< 0' % (bad.sum(), len(bad)))
if times is not None:
bad = ts > times[-1]
if bad.any():
raise RuntimeError('All position times must be <= t_end (%0.1f '
'sec), found %s/%s bad values (is this a split '
'file?)' % (times[-1], bad.sum(), len(bad)))
# If it starts close to zero, make it zero (else unique(offset) fails)
if len(ts) > 0 and ts[0] < (0.5 / info['sfreq']):
ts[0] = 0.
# If it doesn't start at zero, insert one at t=0
elif len(ts) == 0 or ts[0] > 0:
ts = np.r_[[0.], ts]
dev_head_ts.insert(0, info['dev_head_t']['trans'])
dev_head_ts = [{'trans': d, 'to': info['dev_head_t']['to'],
'from': info['dev_head_t']['from']}
for d in dev_head_ts]
offsets = np.round(ts * info['sfreq']).astype(int)
assert np.array_equal(offsets, np.unique(offsets))
assert len(offsets) == len(dev_head_ts)
offsets = list(offsets)
return dev_head_ts, offsets
@verbose
def simulate_raw(info, stc=None, trans=None, src=None, bem=None, head_pos=None,
mindist=1.0, interp='cos2', n_jobs=1, use_cps=True,
forward=None, first_samp=0, max_iter=10000, verbose=None):
u"""Simulate raw data.
Head movements can optionally be simulated using the ``head_pos``
parameter.
Parameters
----------
%(info_not_none)s Used for simulation.
.. versionchanged:: 0.18
Support for :class:`mne.Info`.
stc : iterable | SourceEstimate | SourceSimulator
The source estimates to use to simulate data. Each must have the same
sample rate as the raw data, and the vertices of all stcs in the
iterable must match. Each entry in the iterable can also be a tuple of
``(SourceEstimate, ndarray)`` to allow specifying the stim channel
(e.g., STI001) data accompany the source estimate.
See Notes for details.
.. versionchanged:: 0.18
Support for tuple, iterable of tuple or `~mne.SourceEstimate`,
or `~mne.simulation.SourceSimulator`.
trans : dict | str | None
Either a transformation filename (usually made using mne_analyze)
or an info dict (usually opened using read_trans()).
If string, an ending of ``.fif`` or ``.fif.gz`` will be assumed to
be in FIF format, any other ending will be assumed to be a text
file with a 4x4 transformation matrix (like the ``--trans`` MNE-C
option). If trans is None, an identity transform will be used.
src : str | instance of SourceSpaces | None
Source space corresponding to the stc. If string, should be a source
space filename. Can also be an instance of loaded or generated
SourceSpaces. Can be None if ``forward`` is provided.
bem : str | dict | None
BEM solution corresponding to the stc. If string, should be a BEM
solution filename (e.g., "sample-5120-5120-5120-bem-sol.fif").
Can be None if ``forward`` is provided.
%(head_pos)s
See for example :footcite:`LarsonTaulu2017`.
mindist : float
Minimum distance between sources and the inner skull boundary
to use during forward calculation.
%(interp)s
%(n_jobs)s
%(use_cps)s
forward : instance of Forward | None
The forward operator to use. If None (default) it will be computed
using ``bem``, ``trans``, and ``src``. If not None,
``bem``, ``trans``, and ``src`` are ignored.
.. versionadded:: 0.17
first_samp : int
The first_samp property in the output Raw instance.
.. versionadded:: 0.18
max_iter : int
The maximum number of STC iterations to allow.
This is a sanity parameter to prevent accidental blowups.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
raw : instance of Raw
The simulated raw file.
See Also
--------
mne.chpi.read_head_pos
add_chpi
add_noise
add_ecg
add_eog
simulate_evoked
simulate_stc
simulate_sparse_stc
Notes
-----
**Stim channel encoding**
By default, the stimulus channel will have the head position number
(starting at 1) stored in the trigger channel (if available) at the
t=0 point in each repetition of the ``stc``. If ``stc`` is a tuple of
``(SourceEstimate, ndarray)`` the array values will be placed in the
stim channel aligned with the :class:`mne.SourceEstimate`.
**Data simulation**
In the most advanced case where ``stc`` is an iterable of tuples the output
will be concatenated in time as:
.. table:: Data alignment and stim channel encoding
+---------+--------------------------+--------------------------+---------+
| Channel | Data |
+=========+==========================+==========================+=========+
| M/EEG | ``fwd @ stc[0][0].data`` | ``fwd @ stc[1][0].data`` | ``...`` |
+---------+--------------------------+--------------------------+---------+
| STIM | ``stc[0][1]`` | ``stc[1][1]`` | ``...`` |
+---------+--------------------------+--------------------------+---------+
| | *time →* |
+---------+--------------------------+--------------------------+---------+
.. versionadded:: 0.10.0
References
----------
.. footbibliography::
""" # noqa: E501
_validate_type(info, Info, 'info')
raw_verbose = verbose
if len(pick_types(info, meg=False, stim=True)) == 0:
event_ch = None
else:
event_ch = pick_channels(info['ch_names'],
_get_stim_channel(None, info))[0]
n_jobs = check_n_jobs(n_jobs)
if forward is not None:
if any(x is not None for x in (trans, src, bem, head_pos)):
raise ValueError('If forward is not None then trans, src, bem, '
'and head_pos must all be None')
if not np.allclose(forward['info']['dev_head_t']['trans'],
info['dev_head_t']['trans'], atol=1e-6):
raise ValueError('The forward meg<->head transform '
'forward["info"]["dev_head_t"] does not match '
'the one in raw.info["dev_head_t"]')
src = forward['src']
dev_head_ts, offsets = _check_head_pos(head_pos, info, first_samp, None)
src = _ensure_src(src, verbose=False)
if isinstance(bem, str):
bem = read_bem_solution(bem, verbose=False)
# Extract necessary info
meeg_picks = pick_types(info, meg=True, eeg=True, exclude=[])
logger.info('Setting up raw simulation: %s position%s, "%s" interpolation'
% (len(dev_head_ts), _pl(dev_head_ts), interp))
if isinstance(stc, SourceSimulator) and stc.first_samp != first_samp:
logger.info('SourceSimulator first_samp does not match argument.')
stc_enum, stc_counted, verts = _check_stc_iterable(stc, info)
if forward is not None:
forward = restrict_forward_to_stc(forward, verts)
src = forward['src']
else:
_stc_src_sel(src, verts, on_missing='warn', extra='')
src = _set_source_space_vertices(src.copy(), verts)
# array used to store result
raw_datas = list()
_log_ch('Event information', info, event_ch)
# don't process these any more if no MEG present
n = 1
get_fwd = _SimForwards(
dev_head_ts, offsets, info, trans, src, bem, mindist, n_jobs,
meeg_picks, forward, use_cps)
interper = _Interp2(offsets, get_fwd, interp)
this_start = 0
for n in range(max_iter):
if isinstance(stc_counted[1], (list, tuple)):
this_n = stc_counted[1][0].data.shape[1]
else:
this_n = stc_counted[1].data.shape[1]
this_stop = this_start + this_n
logger.info(' Interval %0.3f-%0.3f sec'
% (this_start / info['sfreq'],
this_stop / info['sfreq']))
n_doing = this_stop - this_start
assert n_doing > 0
this_data = np.zeros((len(info['ch_names']), n_doing))
raw_datas.append(this_data)
# Stim channel
fwd, fi = interper.feed(this_stop - this_start)
fi = fi[0]
stc_data, stim_data, _ = _stc_data_event(
stc_counted, fi, info['sfreq'], get_fwd.src,
None if n == 0 else verts)
if event_ch is not None:
this_data[event_ch, :] = stim_data[:n_doing]
this_data[meeg_picks] = np.einsum('svt,vt->st', fwd, stc_data)
try:
stc_counted = next(stc_enum)
except StopIteration:
logger.info(' %d STC iteration%s provided'
% (n + 1, _pl(n + 1)))
break
del fwd
else:
raise RuntimeError('Maximum number of STC iterations (%d) '
'exceeded' % (n,))
raw_data = np.concatenate(raw_datas, axis=-1)
raw = RawArray(raw_data, info, first_samp=first_samp, verbose=False)
raw.set_annotations(raw.annotations)
raw.verbose = raw_verbose
logger.info('Done')
return raw
@verbose
def add_eog(raw, head_pos=None, interp='cos2', n_jobs=1, random_state=None,
verbose=None):
"""Add blink noise to raw data.
Parameters
----------
raw : instance of Raw
The raw instance to modify.
%(head_pos)s
%(interp)s
%(n_jobs)s
%(random_state)s
The random generator state used for blink, ECG, and sensor noise
randomization.
%(verbose)s
Returns
-------
raw : instance of Raw
The instance, modified in place.
See Also
--------
add_chpi
add_ecg
add_noise
simulate_raw
Notes
-----
The blink artifacts are generated by:
1. Random activation times are drawn from an inhomogeneous poisson
process whose blink rate oscillates between 4.5 blinks/minute
and 17 blinks/minute based on the low (reading) and high (resting)
blink rates from :footcite:`BentivoglioEtAl1997`.
2. The activation kernel is a 250 ms Hanning window.
3. Two activated dipoles are located in the z=0 plane (in head
coordinates) at ±30 degrees away from the y axis (nasion).
4. Activations affect MEG and EEG channels.
The scale-factor of the activation function was chosen based on
visual inspection to yield amplitudes generally consistent with those
seen in experimental data. Noisy versions of the activation will be
stored in the first EOG channel in the raw instance, if it exists.
References
----------
.. footbibliography::
"""
return _add_exg(raw, 'blink', head_pos, interp, n_jobs, random_state)
@verbose
def add_ecg(raw, head_pos=None, interp='cos2', n_jobs=1, random_state=None,
verbose=None):
"""Add ECG noise to raw data.
Parameters
----------
raw : instance of Raw
The raw instance to modify.
%(head_pos)s
%(interp)s
%(n_jobs)s
%(random_state)s
The random generator state used for blink, ECG, and sensor noise
randomization.
%(verbose)s
Returns
-------
raw : instance of Raw
The instance, modified in place.
See Also
--------
add_chpi
add_eog
add_noise
simulate_raw
Notes
-----
The ECG artifacts are generated by:
1. Random inter-beat intervals are drawn from a uniform distribution
of times corresponding to 40 and 80 beats per minute.
2. The activation function is the sum of three Hanning windows with
varying durations and scales to make a more complex waveform.
3. The activated dipole is located one (estimated) head radius to
the left (-x) of head center and three head radii below (+z)
head center; this dipole is oriented in the +x direction.
4. Activations only affect MEG channels.
The scale-factor of the activation function was chosen based on
visual inspection to yield amplitudes generally consistent with those
seen in experimental data. Noisy versions of the activation will be
stored in the first EOG channel in the raw instance, if it exists.
.. versionadded:: 0.18
"""
return _add_exg(raw, 'ecg', head_pos, interp, n_jobs, random_state)
def _add_exg(raw, kind, head_pos, interp, n_jobs, random_state):
assert isinstance(kind, str) and kind in ('ecg', 'blink')
_validate_type(raw, BaseRaw, 'raw')
_check_preload(raw, 'Adding %s noise ' % (kind,))
rng = check_random_state(random_state)
info, times, first_samp = raw.info, raw.times, raw.first_samp
data = raw._data
meg_picks = pick_types(info, meg=True, eeg=False, exclude=())
meeg_picks = pick_types(info, meg=True, eeg=True, exclude=())
R, r0 = fit_sphere_to_headshape(info, units='m', verbose=False)[:2]
bem = make_sphere_model(r0, head_radius=R,
relative_radii=(0.97, 0.98, 0.99, 1.),
sigmas=(0.33, 1.0, 0.004, 0.33), verbose=False)
trans = None
dev_head_ts, offsets = _check_head_pos(head_pos, info, first_samp, times)
if kind == 'blink':
# place dipoles at 45 degree angles in z=0 plane
exg_rr = np.array([[np.cos(np.pi / 3.), np.sin(np.pi / 3.), 0.],
[-np.cos(np.pi / 3.), np.sin(np.pi / 3), 0.]])
exg_rr /= np.sqrt(np.sum(exg_rr * exg_rr, axis=1, keepdims=True))
exg_rr *= 0.96 * R
exg_rr += r0
# oriented upward
nn = np.array([[0., 0., 1.], [0., 0., 1.]])
# Blink times drawn from an inhomogeneous poisson process
# by 1) creating the rate and 2) pulling random numbers
blink_rate = (1 + np.cos(2 * np.pi * 1. / 60. * times)) / 2.
blink_rate *= 12.5 / 60.
blink_rate += 4.5 / 60.
blink_data = rng.uniform(size=len(times)) < blink_rate / info['sfreq']
blink_data = blink_data * (rng.uniform(size=len(times)) + 0.5) # amps
# Activation kernel is a simple hanning window
blink_kernel = np.hanning(int(0.25 * info['sfreq']))
exg_data = np.convolve(blink_data, blink_kernel,
'same')[np.newaxis, :] * 1e-7
# Add rescaled noisy data to EOG ch
ch = pick_types(info, meg=False, eeg=False, eog=True)
picks = meeg_picks
del blink_kernel, blink_rate, blink_data
else:
if len(meg_picks) == 0:
raise RuntimeError('Can only add ECG artifacts if MEG data '
'channels are present')
exg_rr = np.array([[-R, 0, -3 * R]])
max_beats = int(np.ceil(times[-1] * 80. / 60.))
# activation times with intervals drawn from a uniform distribution
# based on activation rates between 40 and 80 beats per minute
cardiac_idx = np.cumsum(rng.uniform(60. / 80., 60. / 40., max_beats) *
info['sfreq']).astype(int)
cardiac_idx = cardiac_idx[cardiac_idx < len(times)]
cardiac_data = np.zeros(len(times))
cardiac_data[cardiac_idx] = 1
# kernel is the sum of three hanning windows
cardiac_kernel = np.concatenate([
2 * np.hanning(int(0.04 * info['sfreq'])),
-0.3 * np.hanning(int(0.05 * info['sfreq'])),
0.2 * np.hanning(int(0.26 * info['sfreq']))], axis=-1)
exg_data = np.convolve(cardiac_data, cardiac_kernel,
'same')[np.newaxis, :] * 15e-8
# Add rescaled noisy data to ECG ch
ch = pick_types(info, meg=False, eeg=False, ecg=True)
picks = meg_picks
del cardiac_data, cardiac_kernel, max_beats, cardiac_idx
nn = np.zeros_like(exg_rr)
nn[:, 0] = 1 # arbitrarily rightward
del meg_picks, meeg_picks
noise = rng.standard_normal(exg_data.shape[1]) * 5e-6
if len(ch) >= 1:
ch = ch[-1]
data[ch, :] = exg_data * 1e3 + noise
else:
ch = None
src = setup_volume_source_space(pos=dict(rr=exg_rr, nn=nn),
sphere_units='mm')
_log_ch('%s simulated and trace' % kind, info, ch)
del ch, nn, noise
used = np.zeros(len(raw.times), bool)
get_fwd = _SimForwards(
dev_head_ts, offsets, info, trans, src, bem, 0.005, n_jobs, picks)
interper = _Interp2(offsets, get_fwd, interp)
proc_lims = np.concatenate([np.arange(0, len(used), 10000), [len(used)]])
for start, stop in zip(proc_lims[:-1], proc_lims[1:]):
fwd, _ = interper.feed(stop - start)
data[picks, start:stop] += np.einsum(
'svt,vt->st', fwd, exg_data[:, start:stop])
assert not used[start:stop].any()
used[start:stop] = True
assert used.all()
@verbose
def add_chpi(raw, head_pos=None, interp='cos2', n_jobs=1, verbose=None):
"""Add cHPI activations to raw data.
Parameters
----------
raw : instance of Raw
The raw instance to be modified.
%(head_pos)s
%(interp)s
%(n_jobs)s
%(verbose)s
Returns
-------
raw : instance of Raw
The instance, modified in place.
Notes
-----
.. versionadded:: 0.18
"""
_validate_type(raw, BaseRaw, 'raw')
_check_preload(raw, 'Adding cHPI signals ')
info, first_samp, times = raw.info, raw.first_samp, raw.times
meg_picks = pick_types(info, meg=True, eeg=False, exclude=[]) # for CHPI
if len(meg_picks) == 0:
raise RuntimeError('Cannot add cHPI if no MEG picks are present')
dev_head_ts, offsets = _check_head_pos(head_pos, info, first_samp, times)
hpi_freqs, hpi_pick, hpi_ons = get_chpi_info(info, on_missing='raise')
hpi_rrs = _get_hpi_initial_fit(info, verbose='error')
hpi_nns = hpi_rrs / np.sqrt(np.sum(hpi_rrs * hpi_rrs,
axis=1))[:, np.newaxis]
# turn on cHPI in file
data = raw._data
data[hpi_pick, :] = hpi_ons.sum()
_log_ch('cHPI status bits enbled and', info, hpi_pick)
sinusoids = 70e-9 * np.sin(2 * np.pi * hpi_freqs[:, np.newaxis] *
(np.arange(len(times)) / info['sfreq']))
info = pick_info(info, meg_picks)
with info._unlock():
info.update(projs=[], bads=[]) # Ensure no 'projs' or 'bads'
megcoils, _, _, _ = _prep_meg_channels(info, ignore_ref=False)
used = np.zeros(len(raw.times), bool)
dev_head_ts.append(dev_head_ts[-1]) # ZOH after time ends
get_fwd = _HPIForwards(offsets, dev_head_ts, megcoils, hpi_rrs, hpi_nns)
interper = _Interp2(offsets, get_fwd, interp)
lims = np.concatenate([offsets, [len(raw.times)]])
for start, stop in zip(lims[:-1], lims[1:]):
fwd, = interper.feed(stop - start)
data[meg_picks, start:stop] += np.einsum(
'svt,vt->st', fwd, sinusoids[:, start:stop])
assert not used[start:stop].any()
used[start:stop] = True
assert used.all()
return raw
class _HPIForwards(object):
def __init__(self, offsets, dev_head_ts, megcoils, hpi_rrs, hpi_nns):
self.offsets = offsets
self.dev_head_ts = dev_head_ts
self.hpi_rrs = hpi_rrs
self.hpi_nns = hpi_nns
self.megcoils = megcoils
self.idx = 0
def __call__(self, offset):
assert offset == self.offsets[self.idx]
_transform_orig_meg_coils(self.megcoils, self.dev_head_ts[self.idx])
fwd = _magnetic_dipole_field_vec(self.hpi_rrs, self.megcoils).T
# align cHPI magnetic dipoles in approx. radial direction
fwd = np.array([np.dot(fwd[:, 3 * ii:3 * (ii + 1)], self.hpi_nns[ii])
for ii in range(len(self.hpi_rrs))]).T
self.idx += 1
return (fwd,)
def _stc_data_event(stc_counted, head_idx, sfreq, src=None, verts=None):
stc_idx, stc = stc_counted
if isinstance(stc, (list, tuple)):
if len(stc) != 2:
raise ValueError('stc, if tuple, must be length 2, got %s'
% (len(stc),))
stc, stim_data = stc
else:
stim_data = None
_validate_type(stc, _BaseSourceEstimate, 'stc',
'SourceEstimate or tuple with first entry SourceEstimate')
# Convert event data
if stim_data is None:
stim_data = np.zeros(len(stc.times), int)
stim_data[np.argmin(np.abs(stc.times))] = head_idx
del head_idx
_validate_type(stim_data, np.ndarray, 'stim_data')
if stim_data.dtype.kind != 'i':
raise ValueError('stim_data in a stc tuple must be an integer ndarray,'
' got dtype %s' % (stim_data.dtype,))
if stim_data.shape != (len(stc.times),):
raise ValueError('event data had shape %s but needed to be (%s,) to'
'match stc' % (stim_data.shape, len(stc.times)))
# Validate STC
if not np.allclose(sfreq, 1. / stc.tstep):
raise ValueError('stc and info must have same sample rate, '
'got %s and %s' % (1. / stc.tstep, sfreq))
if len(stc.times) <= 2: # to ensure event encoding works
raise ValueError('stc must have at least three time points, got %s'
% (len(stc.times),))
verts_ = stc.vertices
if verts is None:
assert stc_idx == 0
else:
if len(verts) != len(verts_) or not all(
np.array_equal(a, b) for a, b in zip(verts, verts_)):
raise RuntimeError('Vertex mismatch for stc[%d], '
'all stc.vertices must match' % (stc_idx,))
stc_data = stc.data
if src is None:
assert stc_idx == 0
else:
# on_missing depends on whether or not this is the first iteration
on_missing = 'warn' if verts is None else 'ignore'
_, stc_sel, _ = _stc_src_sel(src, stc, on_missing=on_missing)
stc_data = stc_data[stc_sel]
return stc_data, stim_data, verts_
class _SimForwards(object):
def __init__(self, dev_head_ts, offsets, info, trans, src, bem, mindist,
n_jobs, meeg_picks, forward=None, use_cps=True):
self.idx = 0
self.offsets = offsets
self.use_cps = use_cps
self.iter = iter(_iter_forward_solutions(
info, trans, src, bem, dev_head_ts, mindist, n_jobs, forward,
meeg_picks))
def __call__(self, offset):
assert self.offsets[self.idx] == offset
self.idx += 1
fwd = next(self.iter)
self.src = fwd['src']
# XXX eventually we could speed this up by allowing the forward
# solution code to only compute the normal direction
convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=self.use_cps, copy=False,
verbose=False)
return fwd['sol']['data'], np.array(self.idx, float)
def _iter_forward_solutions(info, trans, src, bem, dev_head_ts, mindist,
n_jobs, forward, picks):
"""Calculate a forward solution for a subject."""
logger.info('Setting up forward solutions')
info = pick_info(info, picks)
with info._unlock():
info.update(projs=[], bads=[]) # Ensure no 'projs' or 'bads'
mri_head_t, trans = _get_trans(trans)
megcoils, meg_info, compcoils, megnames, eegels, eegnames, rr, info, \
update_kwargs, bem = _prepare_for_forward(
src, mri_head_t, info, bem, mindist, n_jobs, allow_bem_none=True,
verbose=False)
del (src, mindist)
if forward is None:
eegfwd = _compute_forwards(rr, bem, [eegels], [None],
[None], ['eeg'], n_jobs, verbose=False)[0]
eegfwd = _to_forward_dict(eegfwd, eegnames)
else:
if len(eegnames) > 0:
eegfwd = pick_channels_forward(forward, eegnames, verbose=False)
else:
eegfwd = None
# short circuit here if there are no MEG channels (don't need to iterate)
if len(pick_types(info, meg=True)) == 0:
eegfwd.update(**update_kwargs)
for _ in dev_head_ts:
yield eegfwd
yield eegfwd
return
coord_frame = FIFF.FIFFV_COORD_HEAD
if bem is not None and not bem['is_sphere']:
idx = np.where(np.array([s['id'] for s in bem['surfs']]) ==
FIFF.FIFFV_BEM_SURF_ID_BRAIN)[0]
assert len(idx) == 1
# make a copy so it isn't mangled in use
bem_surf = transform_surface_to(bem['surfs'][idx[0]], coord_frame,
mri_head_t, copy=True)
for ti, dev_head_t in enumerate(dev_head_ts):
# Could be *slightly* more efficient not to do this N times,
# but the cost here is tiny compared to actual fwd calculation
logger.info('Computing gain matrix for transform #%s/%s'
% (ti + 1, len(dev_head_ts)))
_transform_orig_meg_coils(megcoils, dev_head_t)
_transform_orig_meg_coils(compcoils, dev_head_t)
# Make sure our sensors are all outside our BEM
coil_rr = np.array([coil['r0'] for coil in megcoils])
# Compute forward
if forward is None:
if not bem['is_sphere']:
outside = ~_CheckInside(bem_surf)(coil_rr, n_jobs,
verbose=False)
elif bem.radius is not None:
d = coil_rr - bem['r0']
outside = np.sqrt(np.sum(d * d, axis=1)) > bem.radius
else: # only r0 provided
outside = np.ones(len(coil_rr), bool)
if not outside.all():
raise RuntimeError('%s MEG sensors collided with inner skull '
'surface for transform %s'
% (np.sum(~outside), ti))
megfwd = _compute_forwards(rr, bem, [megcoils], [compcoils],
[meg_info], ['meg'], n_jobs,
verbose=False)[0]
megfwd = _to_forward_dict(megfwd, megnames)
else:
megfwd = pick_channels_forward(forward, megnames, verbose=False)
fwd = _merge_meg_eeg_fwds(megfwd, eegfwd, verbose=False)
fwd.update(**update_kwargs)
yield fwd
# need an extra one to fill last buffer
yield fwd
|
the-stack_0_10169 | import copy
import json
import logging
import os
import torch
from torch.utils.data import TensorDataset
from utils import get_intent_labels, get_slot_labels
logger = logging.getLogger(__name__)
class InputExample(object):
"""
A single training/test example for simple sequence classification.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
intent_label: (Optional) string. The intent label of the example.
slot_labels: (Optional) list. The slot labels of the example.
"""
def __init__(self, guid, words, intent_label=None, slot_labels=None):
self.guid = guid
self.words = words
self.intent_label = intent_label
self.slot_labels = slot_labels
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, input_ids, attention_mask, token_type_ids, intent_label_id, slot_labels_ids):
self.input_ids = input_ids
self.attention_mask = attention_mask
self.token_type_ids = token_type_ids
self.intent_label_id = intent_label_id
self.slot_labels_ids = slot_labels_ids
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class JointProcessor(object):
"""Processor for the JointBERT data set """
def __init__(self, args):
self.args = args
self.intent_labels = get_intent_labels(args)
self.slot_labels = get_slot_labels(args)
self.input_text_file = "seq.in"
self.intent_label_file = "label"
self.slot_labels_file = "seq.out"
@classmethod
def _read_file(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with open(input_file, "r", encoding="utf-8") as f:
lines = []
for line in f:
lines.append(line.strip())
return lines
def _create_examples(self, texts, intents, slots, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for i, (text, intent, slot) in enumerate(zip(texts, intents, slots)):
guid = "%s-%s" % (set_type, i)
# 1. input_text
words = text.split() # Some are spaced twice
# 2. intent
intent_label = (
self.intent_labels.index(intent) if intent in self.intent_labels else self.intent_labels.index("UNK")
)
# 3. slot
slot_labels = []
for s in slot.split():
slot_labels.append(
self.slot_labels.index(s) if s in self.slot_labels else self.slot_labels.index("UNK")
)
assert len(words) == len(slot_labels)
examples.append(InputExample(guid=guid, words=words, intent_label=intent_label, slot_labels=slot_labels))
return examples
def get_examples(self, mode):
"""
Args:
mode: train, dev, test
"""
data_path = os.path.join(self.args.data_dir, self.args.token_level, mode)
logger.info("LOOKING AT {}".format(data_path))
return self._create_examples(
texts=self._read_file(os.path.join(data_path, self.input_text_file)),
intents=self._read_file(os.path.join(data_path, self.intent_label_file)),
slots=self._read_file(os.path.join(data_path, self.slot_labels_file)),
set_type=mode,
)
processors = {"syllable-level": JointProcessor, "word-level": JointProcessor}
def convert_examples_to_features(
examples,
max_seq_len,
tokenizer,
pad_token_label_id=-100,
cls_token_segment_id=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
):
# Setting based on the current model type
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
unk_token = tokenizer.unk_token
pad_token_id = tokenizer.pad_token_id
features = []
for (ex_index, example) in enumerate(examples):
if ex_index % 5000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
# Tokenize word by word (for NER)
tokens = []
slot_labels_ids = []
for word, slot_label in zip(example.words, example.slot_labels):
word_tokens = tokenizer.tokenize(word)
if not word_tokens:
word_tokens = [unk_token] # For handling the bad-encoded word
tokens.extend(word_tokens)
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
slot_labels_ids.extend([int(slot_label)] + [pad_token_label_id] * (len(word_tokens) - 1))
# Account for [CLS] and [SEP]
special_tokens_count = 2
if len(tokens) > max_seq_len - special_tokens_count:
tokens = tokens[: (max_seq_len - special_tokens_count)]
slot_labels_ids = slot_labels_ids[: (max_seq_len - special_tokens_count)]
# Add [SEP] token
tokens += [sep_token]
slot_labels_ids += [pad_token_label_id]
token_type_ids = [sequence_a_segment_id] * len(tokens)
# Add [CLS] token
tokens = [cls_token] + tokens
slot_labels_ids = [pad_token_label_id] + slot_labels_ids
token_type_ids = [cls_token_segment_id] + token_type_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
attention_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_len - len(input_ids)
input_ids = input_ids + ([pad_token_id] * padding_length)
attention_mask = attention_mask + ([0 if mask_padding_with_zero else 1] * padding_length)
token_type_ids = token_type_ids + ([pad_token_segment_id] * padding_length)
slot_labels_ids = slot_labels_ids + ([pad_token_label_id] * padding_length)
assert len(input_ids) == max_seq_len, "Error with input length {} vs {}".format(len(input_ids), max_seq_len)
assert len(attention_mask) == max_seq_len, "Error with attention mask length {} vs {}".format(
len(attention_mask), max_seq_len
)
assert len(token_type_ids) == max_seq_len, "Error with token type length {} vs {}".format(
len(token_type_ids), max_seq_len
)
assert len(slot_labels_ids) == max_seq_len, "Error with slot labels length {} vs {}".format(
len(slot_labels_ids), max_seq_len
)
intent_label_id = int(example.intent_label)
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % example.guid)
logger.info("tokens: %s" % " ".join([str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("attention_mask: %s" % " ".join([str(x) for x in attention_mask]))
logger.info("token_type_ids: %s" % " ".join([str(x) for x in token_type_ids]))
logger.info("intent_label: %s (id = %d)" % (example.intent_label, intent_label_id))
logger.info("slot_labels: %s" % " ".join([str(x) for x in slot_labels_ids]))
features.append(
InputFeatures(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
intent_label_id=intent_label_id,
slot_labels_ids=slot_labels_ids,
)
)
return features
def load_and_cache_examples(args, tokenizer, mode):
processor = processors[args.token_level](args)
# Load data features from cache or dataset file
cached_features_file = os.path.join(
args.data_dir,
"cached_{}_{}_{}_{}".format(
mode, args.token_level, list(filter(None, args.model_name_or_path.split("/"))).pop(), args.max_seq_len
),
)
if os.path.exists(cached_features_file):
logger.info("Loading features from cached file %s", cached_features_file)
features = torch.load(cached_features_file)
else:
# Load data features from dataset file
logger.info("Creating features from dataset file at %s", args.data_dir)
if mode == "train":
examples = processor.get_examples("train")
elif mode == "dev":
examples = processor.get_examples("dev")
elif mode == "test":
examples = processor.get_examples("test")
else:
raise Exception("For mode, Only train, dev, test is available")
# Use cross entropy ignore index as padding label id so that only real label ids contribute to the loss later
pad_token_label_id = args.ignore_index
features = convert_examples_to_features(
examples, args.max_seq_len, tokenizer, pad_token_label_id=pad_token_label_id
)
logger.info("Saving features into cached file %s", cached_features_file)
torch.save(features, cached_features_file)
# Convert to Tensors and build dataset
all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
all_intent_label_ids = torch.tensor([f.intent_label_id for f in features], dtype=torch.long)
all_slot_labels_ids = torch.tensor([f.slot_labels_ids for f in features], dtype=torch.long)
dataset = TensorDataset(
all_input_ids, all_attention_mask, all_token_type_ids, all_intent_label_ids, all_slot_labels_ids
)
return dataset
|
the-stack_0_10170 | from __future__ import division
import numpy as np
import chainer
from chainer.functions import dropout
from chainer.functions import max_pooling_2d
from chainer.functions import relu
from chainer.functions import softmax
from chainer.initializers import constant
from chainer.initializers import normal
from chainer.links import Linear
from chainercv.links.connection.conv_2d_activ import Conv2DActiv
from chainercv.links.model.pickable_sequential_chain import \
PickableSequentialChain
from chainercv import utils
# RGB order
_imagenet_mean = np.array(
[123.68, 116.779, 103.939], dtype=np.float32)[:, np.newaxis, np.newaxis]
class VGG16(PickableSequentialChain):
"""VGG-16 Network.
This is a pickable sequential link.
The network can choose output layers from set of all
intermediate layers.
The attribute :obj:`pick` is the names of the layers that are going
to be picked by :meth:`forward`.
The attribute :obj:`layer_names` is the names of all layers
that can be picked.
Examples:
>>> model = VGG16()
# By default, forward returns a probability score (after Softmax).
>>> prob = model(imgs)
>>> model.pick = 'conv5_3'
# This is layer conv5_3 (after ReLU).
>>> conv5_3 = model(imgs)
>>> model.pick = ['conv5_3', 'fc6']
>>> # These are layers conv5_3 (after ReLU) and fc6 (before ReLU).
>>> conv5_3, fc6 = model(imgs)
.. seealso::
:class:`chainercv.links.model.PickableSequentialChain`
When :obj:`pretrained_model` is the path of a pre-trained chainer model
serialized as a :obj:`.npz` file in the constructor, this chain model
automatically initializes all the parameters with it.
When a string in the prespecified set is provided, a pretrained model is
loaded from weights distributed on the Internet.
The list of pretrained models supported are as follows:
* :obj:`imagenet`: Loads weights trained with ImageNet and distributed \
at `Model Zoo \
<https://github.com/BVLC/caffe/wiki/Model-Zoo>`_.
Args:
n_class (int): The number of classes. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the number of classes used to train the pretrained model
is used. Otherwise, the number of classes in ILSVRC 2012 dataset
is used.
pretrained_model (string): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
mean (numpy.ndarray): A mean value. If :obj:`None`,
the default values are used.
If a supported pretrained model is used,
the mean value used to train the pretrained model is used.
Otherwise, the mean value calculated from ILSVRC 2012 dataset
is used.
initialW (callable): Initializer for the weights.
initial_bias (callable): Initializer for the biases.
"""
_models = {
'imagenet': {
'param': {'n_class': 1000, 'mean': _imagenet_mean},
'overwritable': ('mean',),
'url': 'https://chainercv-models.preferred.jp/'
'vgg16_imagenet_converted_2017_07_18.npz'
}
}
def __init__(self,
n_class=None, pretrained_model=None, mean=None,
initialW=None, initial_bias=None):
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'mean': mean},
pretrained_model, self._models,
{'n_class': 1000, 'mean': _imagenet_mean})
self.mean = param['mean']
if initialW is None:
# Employ default initializers used in the original paper.
initialW = normal.Normal(0.01)
if pretrained_model:
# As a sampling process is time-consuming,
# we employ a zero initializer for faster computation.
initialW = constant.Zero()
kwargs = {'initialW': initialW, 'initial_bias': initial_bias}
super(VGG16, self).__init__()
with self.init_scope():
self.conv1_1 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs)
self.conv1_2 = Conv2DActiv(None, 64, 3, 1, 1, **kwargs)
self.pool1 = _max_pooling_2d
self.conv2_1 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs)
self.conv2_2 = Conv2DActiv(None, 128, 3, 1, 1, **kwargs)
self.pool2 = _max_pooling_2d
self.conv3_1 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs)
self.conv3_2 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs)
self.conv3_3 = Conv2DActiv(None, 256, 3, 1, 1, **kwargs)
self.pool3 = _max_pooling_2d
self.conv4_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv4_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv4_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.pool4 = _max_pooling_2d
self.conv5_1 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv5_2 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.conv5_3 = Conv2DActiv(None, 512, 3, 1, 1, **kwargs)
self.pool5 = _max_pooling_2d
self.fc6 = Linear(None, 4096, **kwargs)
self.fc6_relu = relu
self.fc6_dropout = dropout
self.fc7 = Linear(None, 4096, **kwargs)
self.fc7_relu = relu
self.fc7_dropout = dropout
self.fc8 = Linear(None, param['n_class'], **kwargs)
self.prob = softmax
if path:
chainer.serializers.load_npz(path, self)
def _max_pooling_2d(x):
return max_pooling_2d(x, ksize=2)
|
the-stack_0_10171 | from shapely.geometry import Polygon
from rtree import index
import copy
import uuid
from collections import Counter
class Box:
def __init__(self):
self.box= {}
self.box['boundingBox'] = {'vertices':[{'x':0,'y':0} ,{'x':0,'y':0},{'x':0,'y':0},{'x':0,'y':0}]}
self.box['identifier'] = str(uuid.uuid4())
self.box['text'] = ''
self.box['class'] ='TEXT'
self.box['font'] = {'family':'Arial Unicode MS', 'size':0, 'style':'REGULAR'}
def get_box(self):
return self.box
class MapKeys:
def __init__(self,box):
self.box = box
self.left = None
self.right = None
self.top = None
self.height = None
self.width = None
self.bottom = None
def get_left(self):
if self.left != None:
return self.left
else :
self.left = int(self.box['boundingBox']['vertices'][0]['x'])
return self.left
def get_right(self):
if self.right != None:
return self.right
else :
self.right = int(self.box['boundingBox']['vertices'][1]['x'])
return self.right
def get_top(self):
if self.top != None:
return self.top
else :
self.top = int(self.box['boundingBox']['vertices'][0]['y'])
return self.top
def get_bottom(self):
if self.bottom != None:
return self.bottom
else :
self.bottom = int(self.box['boundingBox']['vertices'][3]['y'])
return self.bottom
def get_height(self):
if self.height != None:
return self.height
else :
self.height = int(abs(self.get_top() - self.get_bottom()))
return self.height
def get_width(self):
if self.width != None:
return self.width
else :
self.width = int(abs(self.get_left() - self.get_right()))
return self.width
def index_tree(poly_index, poly, idx):
idx.insert(poly_index, poly.bounds)
def get_polygon(region):
points = []
vertices = region['vertices']
for point in vertices:
points.append((point['x'], point['y']))
if not (max(points)==(0,0) and min(points)==(0,0)):
poly = Polygon(points)
if not poly.is_valid:
poly = poly.buffer(0.01)
return poly
else:
return False
def sort_regions(region_lines, sorted_lines=[]):
check_y =region_lines[0]['boundingBox']['vertices'][0]['y']
spacing_threshold = abs(check_y - region_lines[0]['boundingBox']['vertices'][3]['y'])* 0.6 #0.8 # *2 #*0.5
same_line = list(filter(lambda x: (abs(x['boundingBox']['vertices'][0]['y'] - check_y) <= spacing_threshold), region_lines))
next_line = list(filter(lambda x: (abs(x['boundingBox']['vertices'][0]['y'] - check_y) > spacing_threshold), region_lines))
if len(same_line) >1 :
same_line.sort(key=lambda x: x['boundingBox']['vertices'][0]['x'],reverse=False)
sorted_lines += same_line
if len(next_line) > 0:
sort_regions(next_line, sorted_lines)
return sorted_lines
def add_font(regions):
for idx,region in enumerate(regions):
if not 'font' in region.keys():
height = abs(region['boundingBox']['vertices'][0]['y'] - region['boundingBox']['vertices'][2]['y'])
regions[idx]['font']={'family':'Arial Unicode MS', 'size':height, 'style':'REGULAR'}
return regions
def collate_regions(regions, lines, child_class=None, grand_children=False,region_flag = True,skip_enpty_children=False,add_font=False ):
child_key='regions'
idx = index.Index()
lines_intersected = []
if regions !=None and len(regions) > 0:
lines_intersected =[]
for line_idx, line in enumerate(lines):
if child_class == 'LINE':
if 'text' in line.keys():
del lines[line_idx]['text']
if add_font and 'font' not in line.keys():
height = abs(line['boundingBox']['vertices'][0]['y'] - line['boundingBox']['vertices'][2]['y'])
lines[line_idx]['font']={'family':'Arial Unicode MS', 'size':height, 'style':'REGULAR'}
if child_class is not None:
lines[line_idx]['class'] = child_class
poly = get_polygon(line['boundingBox'])
if poly:
idx.insert(line_idx, poly.bounds)
for region_index, region in enumerate(regions):
region_poly = get_polygon(region['boundingBox'])
children_lines =[]
if region_poly:
children_lines = list(idx.intersection(region_poly.bounds))
if len(children_lines) > 0:
region_lines = []
for intr_index in children_lines:
if intr_index not in lines_intersected:
if grand_children :
if child_key not in lines[intr_index].keys():
grand_child = copy.deepcopy(lines[intr_index])
grand_child['class'] = 'WORD'
lines[intr_index][child_key] = [grand_child]
line_poly = get_polygon(lines[intr_index]['boundingBox'])
if line_poly:
area = region_poly.intersection(line_poly).area
reg_area = region_poly.area
line_area = line_poly.area
if reg_area>0 and line_area>0 and area/min(line_area,reg_area) >0.5 :
region_lines.append(lines[intr_index])
lines_intersected.append(intr_index)
region_lines.sort(key=lambda x:x['boundingBox']['vertices'][0]['y'])
if len(region_lines) > 0:
regions[region_index][child_key] = sort_regions(region_lines,[])
regions[region_index]['avg_size'] = get_avrage_size(region_lines)
else:
regions[region_index][child_key] = []
else:
regions[region_index][child_key] = []
if region_flag:
for line_index, line in enumerate(lines):
if line_index not in lines_intersected:
line[child_key] = [ copy.deepcopy(line)]
if child_class is not None:
if child_class is 'LINE':
line['class'] = 'PARA'
if child_class is 'WORD':
line['class'] ='LINE'
regions.append(line)
return regions
def collate_cell_regions(regions, lines, child_class=None, grand_children=False,region_flag = True,skip_enpty_children=False,add_font=False ):
child_key='regions'
idx = index.Index()
lines_intersected = []
if regions !=None and len(regions) > 0:
lines_intersected =[]
for line_idx, line in enumerate(lines):
if child_class == 'LINE':
if 'text' in line.keys():
del lines[line_idx]['text']
if add_font:
height = abs(line['boundingBox']['vertices'][0]['y'] - line['boundingBox']['vertices'][2]['y'])
lines[line_idx]['font']={'family':'Arial Unicode MS', 'size':height, 'style':'REGULAR'}
if child_class is not None:
lines[line_idx]['class'] = child_class
poly = get_polygon(line['boundingBox'])
if poly:
idx.insert(line_idx, poly.bounds)
for region_index, region in enumerate(regions):
children_lines =[]
region_poly = get_polygon(region['boundingBox'])
if region_poly:
children_lines = list(idx.intersection(region_poly.bounds))
if len(children_lines) > 0:
region_lines = []
for intr_index in children_lines:
if intr_index not in lines_intersected:
if grand_children :
if child_key not in lines[intr_index].keys():
grand_child = copy.deepcopy(lines[intr_index])
grand_child['class'] = 'WORD'
lines[intr_index][child_key] = [grand_child]
line_poly = get_polygon(lines[intr_index]['boundingBox'])
if line_poly:
area = region_poly.intersection(line_poly).area
reg_area = region_poly.area
line_area = line_poly.area
if reg_area>0 and line_area>0 and area/min(line_area,reg_area) >0.5 :
region_lines.append(lines[intr_index])
lines_intersected.append(intr_index)
if child_key in region.keys() and type(region[child_key]) is list:
pass
else:
region[child_key] = []
region_lines.sort(key=lambda x:x['boundingBox']['vertices'][0]['y'])
if len(region_lines) > 1:
regions[region_index][child_key].extend(sort_regions(region_lines,[]))
regions[region_index]['avg_size'] = get_avrage_size(region_lines)
else :
regions[region_index][child_key].extend(region_lines)
regions[region_index]['avg_size'] = get_avrage_size(region_lines)
return regions
def collate_text(file,craft_words, google_words):
idx = index.Index()
words_intersected = []
if craft_words !=None and len(craft_words) > 0:
words_intersected =[]
for word_idx, g_word in enumerate(google_words):
poly = get_polygon(g_word['boundingBox'])
if poly:
idx.insert(word_idx, poly.bounds)
for region_index, region in enumerate(craft_words):
region_poly = get_polygon(region['boundingBox'])
if region_poly:
child_words = list(idx.intersection(region_poly.bounds))
text= ''; avg_conf = 0; conf_counter = 0; lang = []
if len(child_words) > 0:
region_words = []
for intr_index in child_words:
if intr_index not in words_intersected:
line_poly = get_polygon(google_words[intr_index]['boundingBox'])
if line_poly:
area = region_poly.intersection(line_poly).area
reg_area = region_poly.area
line_area = line_poly.area
if reg_area>0 and line_area>0 and area/min(line_area,reg_area) >0.3 :
region_words.append(google_words[intr_index])
words_intersected.append(intr_index)
region_words.sort(key=lambda x:x['boundingBox']['vertices'][0]['x'])
for region_word in region_words:
try:
text = text + str(region_word['text'])
if 'conf' in region_word.keys() and region_word['conf'] is not None:
avg_conf += region_word['conf']
conf_counter += 1
if 'language' in region_word.keys() and region_word['language'] is not None:
lang.append(region_word['language'])
except Exception as e:
print('error in collating text' + str(e))
if "craft_word" in file['config']["OCR"].keys() and file['config']["OCR"]["craft_word"]=="False" and len(region_words)>0:
craft_words[region_index]['boundingBox'] = merge_corrds(region_words)
if "craft_word" not in file['config']["OCR"].keys() and len(region_words)>0:
craft_words[region_index]['boundingBox'] = merge_corrds(region_words)
craft_words[region_index]['text'] = text
if conf_counter> 0:
craft_words[region_index]['conf'] = avg_conf/conf_counter
else :
craft_words[region_index]['conf'] = avg_conf
craft_words[region_index]['language'] = frequent_element(lang)
for g_word_index, g_word in enumerate(google_words):
if g_word_index not in words_intersected:
craft_words.append(g_word)
return craft_words
def remvoe_regions(regions, lines):
idx = index.Index()
lines_intersected = []
not_intersecting = []
if regions !=None and len(regions) > 0:
lines_intersected =[]
for line_idx, line in enumerate(lines):
poly = get_polygon(line['boundingBox'])
if poly:
idx.insert(line_idx, poly.bounds)
for region_index, region in enumerate(regions):
region_poly = get_polygon(region['boundingBox'])
if region_poly:
children_lines = list(idx.intersection(region_poly.bounds))
if len(children_lines) > 0:
region_lines = []
for intr_index in children_lines:
region_lines.append(lines[intr_index])
lines_intersected.append(intr_index)
for line_index, line in enumerate(lines):
if line_index not in lines_intersected:
not_intersecting.append(line)
return not_intersecting
def filterd_regions(regions):
f_regions = []
if regions != None :
for region in regions :
height = abs(region['boundingBox']['vertices'][0]['y'] - region['boundingBox']['vertices'][2]['y'])
if height > 0 :
f_regions.append(region)
return f_regions
def frequent_element(l_ist):
if len(l_ist) > 0 :
occurence_count = Counter(l_ist)
return occurence_count.most_common(1)[0][0]
else :
return None
def get_ngram(indices, window_size = 2):
ngrams = []
count = 0
for token in indices[:len(indices)-window_size+1]:
ngrams.append(indices[count:count+window_size])
count = count+1
return ngrams
def are_hlines(region1,region2,avg_ver_ratio):
space = abs( region1['boundingBox']['vertices'][0]['y'] - region2['boundingBox']['vertices'][0]['y'])
sepration = region2['boundingBox']['vertices'][0]['x'] - region1['boundingBox']['vertices'][1]['x']
h1 = abs(region1['boundingBox']['vertices'][3]['y'] - region1['boundingBox']['vertices'][0]['y'])
h2 = abs(region2['boundingBox']['vertices'][3]['y'] - region2['boundingBox']['vertices'][0]['y'])
max_height = max( h1 , h2 ) #*0.5
if avg_ver_ratio < 1.8 :
diff_threshold = max_height * 0.8
if avg_ver_ratio >= 1.8 :
diff_threshold = max_height * 0.9
#return ((space <= diff_threshold ) or(sepration <= 3 *avg_height)) and (sepration < 6 * avg_height) and (space <= diff_threshold *2.5 )
return sepration < 5 * max_height and space <= diff_threshold
def merge_text(v_blocks):
for block_index, v_block in enumerate(v_blocks):
try:
v_blocks[block_index]['font'] ={'family':'Arial Unicode MS', 'size':0, 'style':'REGULAR'}
v_blocks['font']['size'] = max(v_block['regions'], key=lambda x: x['font']['size'])['font']['size']
if len(v_block['regions']) > 0 :
v_blocks[block_index]['text'] = v_block['regions'][0]['text']
if len(v_block['regions']) > 1:
for child in range(1, len(v_block['regions'])):
v_blocks[block_index]['text'] += ' ' + str(v_block['regions'][child]['text'])
except Exception as e:
print('Error in merging text {}'.format(e))
return v_blocks
def get_avrage_size(regions):
size = 0
if regions != None:
len_regions = len(regions)
count=0
if len_regions> 0 :
for region in regions :
if 'font' in region.keys():
size += region['font']['size']
count=count+1
if count==0:
count=1
return int(size/ count)
else:
return size
else:
return size
def set_font_info(page_words,font_info):
idx = index.Index()
if font_info != None and len(font_info) > 0:
for word_idx, word in enumerate(page_words):
height = abs(word['boundingBox']['vertices'][0]['y'] - word['boundingBox']['vertices'][2]['y'])
page_words[word_idx]['font'] = {'family': 'Arial Unicode MS', 'size': height, 'style': 'REGULAR'}
poly = get_polygon(word['boundingBox'])
if poly:
idx.insert(word_idx, poly.bounds)
for region_index, region in enumerate(font_info):
region_poly = get_polygon(region['boundingBox'])
if region_poly:
children_lines = list(idx.intersection(region_poly.bounds))
if len(children_lines) > 0:
for intr_index in children_lines:
#if intr_index not in words_intersected:
line_poly = get_polygon(page_words[intr_index]['boundingBox'])
if line_poly:
area = region_poly.intersection(line_poly).area
reg_area = region_poly.area
line_area = line_poly.area
if reg_area > 0 and line_area > 0 :
if (region['class'] == 'BOLD') and (area / min(line_area, reg_area) > 0.2):
page_words[intr_index]['font']['style']= update_style(page_words[intr_index]['font']['style'], 'BOLD')
if (region['class'] == 'SUPERSCRIPT') and (region_poly.union(line_poly).area != 0):
iou = area / region_poly.union(line_poly).area
if iou > 0.33:
page_words[intr_index]['font']['style']= update_style(page_words[intr_index]['font']['style'], 'SUPERSCRIPT')
return page_words
def update_style(prior_cls, cls):
if prior_cls == 'REGULAR':
return cls
else :
if prior_cls == cls:
return cls
else :
return '{},{}'.format(prior_cls,cls)
def merge_children(siblings,children_none=False):
box = Box().get_box()
if not children_none:
box['regions'] = copy.deepcopy(siblings)
box['boundingBox']['vertices'][0]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['x'])['boundingBox']['vertices'][0]['x']
box['boundingBox']['vertices'][0]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['y'])['boundingBox']['vertices'][0]['y']
box['boundingBox']['vertices'][1]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][1]['x'])['boundingBox']['vertices'][1]['x']
box['boundingBox']['vertices'][1]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][1]['y'])['boundingBox']['vertices'][1]['y']
box['boundingBox']['vertices'][2]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['x'])['boundingBox']['vertices'][2]['x']
box['boundingBox']['vertices'][2]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['y'])['boundingBox']['vertices'][2]['y']
box['boundingBox']['vertices'][3]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][3]['x'])['boundingBox']['vertices'][3]['x']
box['boundingBox']['vertices'][3]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][3]['y'])['boundingBox']['vertices'][3]['y']
return box
def merge_corrds(siblings,children_none=False):
box = Box().get_box()
box['boundingBox']['vertices'][0]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['x'])['boundingBox']['vertices'][0]['x']
box['boundingBox']['vertices'][0]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][0]['y'])['boundingBox']['vertices'][0]['y']
box['boundingBox']['vertices'][1]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][1]['x'])['boundingBox']['vertices'][1]['x']
box['boundingBox']['vertices'][1]['y'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][1]['y'])['boundingBox']['vertices'][1]['y']
box['boundingBox']['vertices'][2]['x'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['x'])['boundingBox']['vertices'][2]['x']
box['boundingBox']['vertices'][2]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][2]['y'])['boundingBox']['vertices'][2]['y']
box['boundingBox']['vertices'][3]['x'] = min(siblings, key=lambda x: x['boundingBox']['vertices'][3]['x'])['boundingBox']['vertices'][3]['x']
box['boundingBox']['vertices'][3]['y'] = max(siblings, key=lambda x: x['boundingBox']['vertices'][3]['y'])['boundingBox']['vertices'][3]['y']
return box['boundingBox']
|
the-stack_0_10172 | load(":known_shas.bzl", "FILE_KEY_TO_SHA")
load("//rust/platform:triple_mappings.bzl", "system_to_binary_ext", "system_to_dylib_ext", "system_to_staticlib_ext", "triple_to_constraint_set", "triple_to_system")
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
DEFAULT_TOOLCHAIN_NAME_PREFIX = "toolchain_for"
def rust_repositories():
"""Emits a default set of toolchains for Linux, OSX, and Freebsd
Skip this macro and call the `rust_repository_set` macros directly if you need a compiler for
other hosts or for additional target triples.
"""
RUST_VERSION = "1.35.0"
maybe(
http_archive,
name = "rules_cc",
url = "https://github.com/bazelbuild/rules_cc/archive/624b5d59dfb45672d4239422fa1e3de1822ee110.zip",
sha256 = "8c7e8bf24a2bf515713445199a677ee2336e1c487fa1da41037c6026de04bbc3",
strip_prefix = "rules_cc-624b5d59dfb45672d4239422fa1e3de1822ee110",
type = "zip",
)
rust_repository_set(
name = "rust_linux_x86_64",
exec_triple = "x86_64-unknown-linux-gnu",
extra_target_triples = ["wasm32-unknown-unknown"],
version = RUST_VERSION,
)
rust_repository_set(
name = "rust_darwin_x86_64",
exec_triple = "x86_64-apple-darwin",
extra_target_triples = ["wasm32-unknown-unknown"],
version = RUST_VERSION,
)
rust_repository_set(
name = "rust_freebsd_x86_64",
exec_triple = "x86_64-unknown-freebsd",
extra_target_triples = ["wasm32-unknown-unknown"],
version = RUST_VERSION,
)
def _check_version_valid(version, iso_date, param_prefix = ""):
"""Verifies that the provided rust version and iso_date make sense."""
if not version and iso_date:
fail("{param_prefix}iso_date must be paired with a {param_prefix}version".format(param_prefix = param_prefix))
if version in ("beta", "nightly") and not iso_date:
fail("{param_prefix}iso_date must be specified if version is 'beta' or 'nightly'".format(param_prefix = param_prefix))
if version not in ("beta", "nightly") and iso_date:
print("{param_prefix}iso_date is ineffective if an exact version is specified".format(param_prefix = param_prefix))
def serialized_constraint_set_from_triple(target_triple):
constraint_set = triple_to_constraint_set(target_triple)
constraint_set_strs = []
for constraint in constraint_set:
constraint_set_strs.append("\"{}\"".format(constraint))
return "[{}]".format(", ".join(constraint_set_strs))
def BUILD_for_compiler(target_triple):
"""Emits a BUILD file the compiler .tar.gz."""
system = triple_to_system(target_triple)
return """
load("@io_bazel_rules_rust//rust:toolchain.bzl", "rust_toolchain")
filegroup(
name = "rustc",
srcs = ["bin/rustc{binary_ext}"],
visibility = ["//visibility:public"],
)
filegroup(
name = "rustc_lib",
srcs = glob([
"lib/*{dylib_ext}",
"lib/rustlib/{target_triple}/codegen-backends/*{dylib_ext}",
]),
visibility = ["//visibility:public"],
)
filegroup(
name = "rustdoc",
srcs = ["bin/rustdoc{binary_ext}"],
visibility = ["//visibility:public"],
)
""".format(
binary_ext = system_to_binary_ext(system),
staticlib_ext = system_to_staticlib_ext(system),
dylib_ext = system_to_dylib_ext(system),
target_triple = target_triple,
)
def BUILD_for_stdlib(target_triple):
"""Emits a BUILD file the stdlib .tar.gz."""
system = triple_to_system(target_triple)
return """
filegroup(
name = "rust_lib-{target_triple}",
srcs = glob(
[
"lib/rustlib/{target_triple}/lib/*.rlib",
"lib/rustlib/{target_triple}/lib/*{dylib_ext}",
"lib/rustlib/{target_triple}/lib/*{staticlib_ext}",
],
# Some patterns (e.g. `lib/*.a`) don't match anything, see https://github.com/bazelbuild/rules_rust/pull/245
allow_empty = True,
),
visibility = ["//visibility:public"],
)
""".format(
binary_ext = system_to_binary_ext(system),
staticlib_ext = system_to_staticlib_ext(system),
dylib_ext = system_to_dylib_ext(system),
target_triple = target_triple,
)
def BUILD_for_rust_toolchain(workspace_name, name, exec_triple, target_triple, default_edition = "2015"):
"""Emits a toolchain declaration to match an existing compiler and stdlib.
Args:
workspace_name: The name of the workspace that this toolchain resides in
name: The name of the toolchain declaration
exec_triple: The rust-style target that this compiler runs on
target_triple: The rust-style target triple of the tool
"""
system = triple_to_system(target_triple)
return """
rust_toolchain(
name = "{toolchain_name}_impl",
rust_doc = "@{workspace_name}//:rustdoc",
rust_lib = "@{workspace_name}//:rust_lib-{target_triple}",
rustc = "@{workspace_name}//:rustc",
rustc_lib = "@{workspace_name}//:rustc_lib",
staticlib_ext = "{staticlib_ext}",
dylib_ext = "{dylib_ext}",
os = "{system}",
default_edition = "{default_edition}",
exec_triple = "{exec_triple}",
target_triple = "{target_triple}",
visibility = ["//visibility:public"],
)
""".format(
toolchain_name = name,
workspace_name = workspace_name,
staticlib_ext = system_to_staticlib_ext(system),
dylib_ext = system_to_dylib_ext(system),
system = system,
default_edition = default_edition,
exec_triple = exec_triple,
target_triple = target_triple,
)
def BUILD_for_toolchain(name, parent_workspace_name, exec_triple, target_triple):
return """
toolchain(
name = "{name}",
exec_compatible_with = {exec_constraint_sets_serialized},
target_compatible_with = {target_constraint_sets_serialized},
toolchain = "@{parent_workspace_name}//:{name}_impl",
toolchain_type = "@io_bazel_rules_rust//rust:toolchain",
)
""".format(
name = name,
exec_constraint_sets_serialized = serialized_constraint_set_from_triple(exec_triple),
target_constraint_sets_serialized = serialized_constraint_set_from_triple(target_triple),
parent_workspace_name = parent_workspace_name,
)
def produce_tool_suburl(tool_name, target_triple, version, iso_date = None):
"""Produces a fully qualified Rust tool name for URL
Args:
tool_name: The name of the tool per static.rust-lang.org
target_triple: The rust-style target triple of the tool
version: The version of the tool among "nightly", "beta', or an exact version.
iso_date: The date of the tool (or None, if the version is a specific version).
"""
if iso_date:
return "{}/{}-{}-{}".format(iso_date, tool_name, version, target_triple)
else:
return "{}-{}-{}".format(tool_name, version, target_triple)
def produce_tool_path(tool_name, target_triple, version):
"""Produces a qualified Rust tool name
Args:
tool_name: The name of the tool per static.rust-lang.org
target_triple: The rust-style target triple of the tool
version: The version of the tool among "nightly", "beta', or an exact version.
"""
return "{}-{}-{}".format(tool_name, version, target_triple)
def load_arbitrary_tool(ctx, tool_name, param_prefix, tool_subdirectory, version, iso_date, target_triple):
"""Loads a Rust tool, downloads, and extracts into the common workspace.
This function sources the tool from the Rust-lang static file server. The index is available
at: https://static.rust-lang.org/dist/index.html (or the path specified by
"${RUST_STATIC_URL}/dist/index.html" if the RUST_STATIC_URL envinronment variable is set).
Args:
ctx: A repository_ctx (no attrs required).
tool_name: The name of the given tool per the archive naming.
param_prefix: The name of the versioning param if the repository rule supports multiple tools.
tool_subdirectory: The subdirectory of the tool files (wo level below the root directory of
the archive. The root directory of the archive is expected to match
$TOOL_NAME-$VERSION-$TARGET_TRIPLE.
version: The version of the tool among "nightly", "beta', or an exact version.
iso_date: The date of the tool (or None, if the version is a specific version).
target_triple: The rust-style target triple of the tool
"""
_check_version_valid(version, iso_date, param_prefix)
# N.B. See https://static.rust-lang.org/dist/index.html to find the tool_suburl for a given
# tool.
tool_suburl = produce_tool_suburl(tool_name, target_triple, version, iso_date)
static_rust = ctx.os.environ["STATIC_RUST_URL"] if "STATIC_RUST_URL" in ctx.os.environ else "https://static.rust-lang.org"
url = "{}/dist/{}.tar.gz".format(static_rust, tool_suburl)
tool_path = produce_tool_path(tool_name, target_triple, version)
ctx.download_and_extract(
url,
output = "",
sha256 = FILE_KEY_TO_SHA.get(tool_suburl) or "",
stripPrefix = "{}/{}".format(tool_path, tool_subdirectory),
)
def _load_rust_compiler(ctx):
"""Loads a rust compiler and yields corresponding BUILD for it
Args:
ctx: A repository_ctx.
Returns:
The BUILD file contents for this compiler and compiler library
"""
target_triple = ctx.attr.exec_triple
load_arbitrary_tool(
ctx,
iso_date = ctx.attr.iso_date,
param_prefix = "rustc_",
target_triple = target_triple,
tool_name = "rustc",
tool_subdirectory = "rustc",
version = ctx.attr.version,
)
compiler_BUILD = BUILD_for_compiler(target_triple)
return compiler_BUILD
def _load_rust_stdlib(ctx, target_triple):
"""Loads a rust standard library and yields corresponding BUILD for it
Args:
ctx: A repository_ctx.
target_triple: The rust-style target triple of the tool
Returns:
The BUILD file contents for this stdlib, and a toolchain decl to match
"""
load_arbitrary_tool(
ctx,
iso_date = ctx.attr.iso_date,
param_prefix = "rust-std_",
target_triple = target_triple,
tool_name = "rust-std",
tool_subdirectory = "rust-std-{}".format(target_triple),
version = ctx.attr.version,
)
toolchain_prefix = ctx.attr.toolchain_name_prefix or DEFAULT_TOOLCHAIN_NAME_PREFIX
stdlib_BUILD = BUILD_for_stdlib(target_triple)
toolchain_BUILD = BUILD_for_rust_toolchain(
name = "{toolchain_prefix}_{target_triple}".format(
toolchain_prefix = toolchain_prefix,
target_triple = target_triple,
),
exec_triple = ctx.attr.exec_triple,
target_triple = target_triple,
workspace_name = ctx.attr.name,
)
return stdlib_BUILD + toolchain_BUILD
def _rust_toolchain_repository_impl(ctx):
"""The implementation of the rust toolchain repository rule."""
_check_version_valid(ctx.attr.version, ctx.attr.iso_date)
BUILD_components = [_load_rust_compiler(ctx)]
for target_triple in [ctx.attr.exec_triple] + ctx.attr.extra_target_triples:
BUILD_components.append(_load_rust_stdlib(ctx, target_triple))
ctx.file("WORKSPACE", "")
ctx.file("BUILD", "\n".join(BUILD_components))
def _rust_toolchain_repository_proxy_impl(ctx):
BUILD_components = []
for target_triple in [ctx.attr.exec_triple] + ctx.attr.extra_target_triples:
BUILD_components.append(BUILD_for_toolchain(
name = "{toolchain_prefix}_{target_triple}".format(
toolchain_prefix = ctx.attr.toolchain_name_prefix,
target_triple = target_triple,
),
exec_triple = ctx.attr.exec_triple,
parent_workspace_name = ctx.attr.parent_workspace_name,
target_triple = target_triple,
))
ctx.file("WORKSPACE", "")
ctx.file("BUILD", "\n".join(BUILD_components))
"""Composes a single workspace containing the toolchain components for compiling on a given
platform to a series of target platforms.
A given instance of this rule should be accompanied by a rust_toolchain_repository_proxy
invocation to declare its toolchains to Bazel; the indirection allows separating toolchain
selection from toolchain fetching
Args:
name: A unique name for this rule
exec_triple: The Rust-style target triple for the compilation platform
extra_target_triples: The Rust-style triples for extra compilation targets
toolchain_name_prefix: The per-target prefix expected for the rust_toolchain declarations
version: The version of the tool among "nightly", "beta', or an exact version.
iso_date: The date of the tool (or None, if the version is a specific version).
"""
rust_toolchain_repository = repository_rule(
attrs = {
"version": attr.string(mandatory = True),
"iso_date": attr.string(),
"exec_triple": attr.string(mandatory = True),
"extra_target_triples": attr.string_list(),
"toolchain_name_prefix": attr.string(),
},
implementation = _rust_toolchain_repository_impl,
)
"""Generates a toolchain-bearing repository that declares the toolchains from some other
rust_toolchain_repository.
Args:
name: A unique name for this rule
parent_workspace_name: The name of the other rust_toolchain_repository
exec_triple: The Rust-style target triple for the compilation platform
extra_target_triples: The Rust-style triples for extra compilation targets
toolchain_name_prefix: The per-target prefix expected for the rust_toolchain declarations in the
parent workspace.
"""
rust_toolchain_repository_proxy = repository_rule(
attrs = {
"parent_workspace_name": attr.string(mandatory = True),
"exec_triple": attr.string(mandatory = True),
"extra_target_triples": attr.string_list(),
"toolchain_name_prefix": attr.string(),
},
implementation = _rust_toolchain_repository_proxy_impl,
)
def rust_repository_set(name, version, exec_triple, extra_target_triples, iso_date = None):
"""Assembles a remote repository for the given toolchain params, produces a proxy repository
to contain the toolchain declaration, and registers the toolchains.
N.B. A "proxy repository" is needed to allow for registering the toolchain (with constraints)
without actually downloading the toolchain.
Args:
name: The name of the generated repository
version: The version of the tool among "nightly", "beta', or an exact version.
iso_date: The date of the tool (or None, if the version is a specific version).
exec_triple: The Rust-style target that this compiler runs on
extra_target_triples: Additional rust-style targets that this set of toolchains
should support.
"""
rust_toolchain_repository(
name = name,
exec_triple = exec_triple,
extra_target_triples = extra_target_triples,
iso_date = iso_date,
toolchain_name_prefix = DEFAULT_TOOLCHAIN_NAME_PREFIX,
version = version,
)
rust_toolchain_repository_proxy(
name = name + "_toolchains",
exec_triple = exec_triple,
extra_target_triples = extra_target_triples,
parent_workspace_name = name,
toolchain_name_prefix = DEFAULT_TOOLCHAIN_NAME_PREFIX,
)
all_toolchain_names = []
for target_triple in [exec_triple] + extra_target_triples:
all_toolchain_names.append("@{name}_toolchains//:{toolchain_name_prefix}_{triple}".format(
name = name,
toolchain_name_prefix = DEFAULT_TOOLCHAIN_NAME_PREFIX,
triple = target_triple,
))
# Register toolchains
native.register_toolchains(*all_toolchain_names)
native.register_toolchains("@io_bazel_rules_rust//rust/private/dummy_cc_toolchain:dummy_cc_wasm32_toolchain")
|
the-stack_0_10173 | """
lml.plugin
~~~~~~~~~~~~~~~~~~~
lml divides the plugins into two category: load-me-later plugins and
load-me-now ones. load-me-later plugins refer to the plugins were
loaded when needed due its bulky and/or memory hungry dependencies.
Those plugins has to use lml and respect lml's design principle.
load-me-now plugins refer to the plugins are immediately imported. All
conventional Python classes are by default immediately imported.
:class:`~lml.plugin.PluginManager` should be inherited to form new
plugin manager class. If you have more than one plugins in your
architecture, it is advisable to have one class per plugin type.
:class:`~lml.plugin.PluginInfoChain` helps the plugin module to
declare the available plugins in the module.
:class:`~lml.plugin.PluginInfo` can be subclassed to describe
your plugin. Its method :meth:`~lml.plugin.PluginInfo.tags`
can be overridden to help its matching :class:`~lml.plugin.PluginManager`
to look itself up.
:copyright: (c) 2017-2020 by Onni Software Ltd.
:license: New BSD License, see LICENSE for more details
"""
import logging
from collections import defaultdict
from lml.utils import json_dumps, do_import_class
PLUG_IN_MANAGERS = {}
CACHED_PLUGIN_INFO = defaultdict(list)
log = logging.getLogger(__name__)
class PluginInfo(object):
"""
Information about the plugin.
It is used together with PluginInfoChain to describe the plugins.
Meanwhile, it is a class decorator and can be used to register a plugin
immediately for use, in other words, the PluginInfo decorated plugin
class is not loaded later.
Parameters
-------------
name:
plugin name
absolute_import_path:
absolute import path from your plugin name space for your plugin class
tags:
a list of keywords help the plugin manager to retrieve your plugin
keywords:
Another custom properties.
Examples
-------------
For load-me-later plugins:
>>> info = PluginInfo("sample",
... abs_class_path='lml.plugin.PluginInfo', # demonstration only.
... tags=['load-me-later'],
... custom_property = 'I am a custom property')
>>> print(info.module_name)
lml
>>> print(info.custom_property)
I am a custom property
For load-me-now plugins:
>>> @PluginInfo("sample", tags=['load-me-now'])
... class TestPlugin:
... def echo(self, words):
... print("echoing %s" % words)
Now let's retrive the second plugin back:
>>> class SamplePluginManager(PluginManager):
... def __init__(self):
... PluginManager.__init__(self, "sample")
>>> sample_manager = SamplePluginManager()
>>> test_plugin=sample_manager.get_a_plugin("load-me-now")
>>> test_plugin.echo("hey..")
echoing hey..
"""
def __init__(
self, plugin_type, abs_class_path=None, tags=None, **keywords
):
self.plugin_type = plugin_type
self.absolute_import_path = abs_class_path
self.cls = None
self.properties = keywords
self.__tags = tags
def __getattr__(self, name):
if name == "module_name":
if self.absolute_import_path:
module_name = self.absolute_import_path.split(".")[0]
else:
module_name = self.cls.__module__
return module_name
return self.properties.get(name)
def tags(self):
"""
A list of tags for identifying the plugin class
The plugin class is described at the absolute_import_path
"""
if self.__tags is None:
yield self.plugin_type
else:
for tag in self.__tags:
yield tag
def __repr__(self):
rep = {
"plugin_type": self.plugin_type,
"path": self.absolute_import_path,
}
rep.update(self.properties)
return json_dumps(rep)
def __call__(self, cls):
self.cls = cls
_register_a_plugin(self, cls)
return cls
class PluginInfoChain(object):
"""
Pandas style, chained list declaration
It is used in the plugin packages to list all plugin classes
"""
def __init__(self, path):
self._logger = logging.getLogger(
self.__class__.__module__ + "." + self.__class__.__name__
)
self.module_name = path
def add_a_plugin(self, plugin_type, submodule=None, **keywords):
"""
Add a plain plugin
Parameters
-------------
plugin_type:
plugin manager name
submodule:
the relative import path to your plugin class
"""
a_plugin_info = PluginInfo(
plugin_type, self._get_abs_path(submodule), **keywords
)
self.add_a_plugin_instance(a_plugin_info)
return self
def add_a_plugin_instance(self, plugin_info_instance):
"""
Add a plain plugin
Parameters
-------------
plugin_info_instance:
an instance of PluginInfo
The developer has to specify the absolute import path
"""
self._logger.debug(
"add %s as '%s' plugin",
plugin_info_instance.absolute_import_path,
plugin_info_instance.plugin_type,
)
_load_me_later(plugin_info_instance)
return self
def _get_abs_path(self, submodule):
return "%s.%s" % (self.module_name, submodule)
class PluginManager(object):
"""
Load plugin info into in-memory dictionary for later import
Parameters
--------------
plugin_type:
the plugin type. All plugins of this plugin type will be
registered to it.
"""
def __init__(self, plugin_type):
self.plugin_name = plugin_type
self.registry = defaultdict(list)
self.tag_groups = dict()
self._logger = logging.getLogger(
self.__class__.__module__ + "." + self.__class__.__name__
)
_register_class(self)
def get_a_plugin(self, key, **keywords):
""" Get a plugin
Parameters
---------------
key:
the key to find the plugins
keywords:
additional parameters for help the retrieval of the plugins
"""
self._logger.debug("get a plugin called")
plugin = self.load_me_now(key)
return plugin()
def raise_exception(self, key):
"""Raise plugin not found exception
Override this method to raise custom exception
Parameters
-----------------
key:
the key to find the plugin
"""
self._logger.debug(self.registry.keys())
raise Exception("No %s is found for %s" % (self.plugin_name, key))
def load_me_later(self, plugin_info):
"""
Register a plugin info for later loading
Parameters
--------------
plugin_info:
a instance of plugin info
"""
self._logger.debug("load %s later", plugin_info.absolute_import_path)
self._update_registry_and_expand_tag_groups(plugin_info)
def load_me_now(self, key, library=None, **keywords):
"""
Import a plugin from plugin registry
Parameters
-----------------
key:
the key to find the plugin
library:
to use a specific plugin module
"""
if keywords:
self._logger.debug(keywords)
__key = key.lower()
if __key in self.registry:
for plugin_info in self.registry[__key]:
cls = self.dynamic_load_library(plugin_info)
module_name = _get_me_pypi_package_name(cls)
if library and module_name != library:
continue
else:
break
else:
# only library condition could raise an exception
self._logger.debug("%s is not installed" % library)
self.raise_exception(key)
self._logger.debug("load %s now for '%s'", cls, key)
return cls
else:
self.raise_exception(key)
def dynamic_load_library(self, a_plugin_info):
"""Dynamically load the plugin info if not loaded
Parameters
--------------
a_plugin_info:
a instance of plugin info
"""
if a_plugin_info.cls is None:
self._logger.debug("import " + a_plugin_info.absolute_import_path)
cls = do_import_class(a_plugin_info.absolute_import_path)
a_plugin_info.cls = cls
return a_plugin_info.cls
def register_a_plugin(self, plugin_cls, plugin_info):
""" for dynamically loaded plugin during runtime
Parameters
--------------
plugin_cls:
the actual plugin class refered to by the second parameter
plugin_info:
a instance of plugin info
"""
self._logger.debug("register %s", _show_me_your_name(plugin_cls))
plugin_info.cls = plugin_cls
self._update_registry_and_expand_tag_groups(plugin_info)
def get_primary_key(self, key):
__key = key.lower()
return self.tag_groups.get(__key, None)
def _update_registry_and_expand_tag_groups(self, plugin_info):
primary_tag = None
for index, key in enumerate(plugin_info.tags()):
self.registry[key.lower()].append(plugin_info)
if index == 0:
primary_tag = key.lower()
self.tag_groups[key.lower()] = primary_tag
def _register_class(cls):
"""Reigister a newly created plugin manager"""
log.debug("declare '%s' plugin manager", cls.plugin_name)
PLUG_IN_MANAGERS[cls.plugin_name] = cls
if cls.plugin_name in CACHED_PLUGIN_INFO:
# check if there is early registrations or not
for plugin_info in CACHED_PLUGIN_INFO[cls.plugin_name]:
if plugin_info.absolute_import_path:
log.debug(
"load cached plugin info: %s",
plugin_info.absolute_import_path,
)
else:
log.debug(
"load cached plugin info: %s",
_show_me_your_name(plugin_info.cls),
)
cls.load_me_later(plugin_info)
del CACHED_PLUGIN_INFO[cls.plugin_name]
def _register_a_plugin(plugin_info, plugin_cls):
"""module level function to register a plugin"""
manager = PLUG_IN_MANAGERS.get(plugin_info.plugin_type)
if manager:
manager.register_a_plugin(plugin_cls, plugin_info)
else:
# let's cache it and wait the manager to be registered
try:
log.debug("caching %s", _show_me_your_name(plugin_cls.__name__))
except AttributeError:
log.debug("caching %s", _show_me_your_name(plugin_cls))
CACHED_PLUGIN_INFO[plugin_info.plugin_type].append(plugin_info)
def _load_me_later(plugin_info):
""" module level function to load a plugin later"""
manager = PLUG_IN_MANAGERS.get(plugin_info.plugin_type)
if manager:
manager.load_me_later(plugin_info)
else:
# let's cache it and wait the manager to be registered
log.debug(
"caching %s for %s",
plugin_info.absolute_import_path,
plugin_info.plugin_type,
)
CACHED_PLUGIN_INFO[plugin_info.plugin_type].append(plugin_info)
def _get_me_pypi_package_name(module):
try:
module_name = module.__module__
root_module_name = module_name.split(".")[0]
return root_module_name.replace("_", "-")
except AttributeError:
return None
def _show_me_your_name(cls_func_or_data_type):
try:
return cls_func_or_data_type.__name__
except AttributeError:
return str(type(cls_func_or_data_type))
|
the-stack_0_10176 | """
Imports the various compute backends
"""
from typing import Set
from ..exceptions import InputError, ResourceError
from .cfour import CFOURHarness
from .dftd3 import DFTD3Harness
from .entos import EntosHarness
from .gamess import GAMESSHarness
from .molpro import MolproHarness
from .mopac import MopacHarness
from .mp2d import MP2DHarness
from .nwchem import NWChemHarness
from .openmm import OpenMMHarness
from .psi4 import Psi4Harness
from .qchem import QChemHarness
from .rdkit import RDKitHarness
from .terachem import TeraChemHarness
from .torchani import TorchANIHarness
from .turbomole import TurbomoleHarness
__all__ = ["register_program", "get_program", "list_all_programs", "list_available_programs"]
programs = {}
def register_program(entry_point: "ProgramHarness") -> None:
"""
Register a new ProgramHarness with QCEngine.
"""
name = entry_point.name
if name.lower() in programs.keys():
raise ValueError("{} is already a registered program.".format(name))
programs[name.lower()] = entry_point
def unregister_program(name: str) -> None:
"""
Unregisters a given program.
"""
ret = programs.pop(name.lower(), None)
if ret is None:
raise KeyError(f"Program {name} is not registered with QCEngine")
def get_program(name: str, check: bool = True) -> "ProgramHarness":
"""
Returns a program's executor class
Parameters
----------
check
``True`` Do raise error if program not found. ``False`` is handy for
the specialized case of calling non-execution methods (like parsing for testing)
on the returned ``Harness``.
"""
name = name.lower()
if name not in programs:
raise InputError(f"Program {name} is not registered to QCEngine.")
ret = programs[name]
if check:
try:
ret.found(raise_error=True)
except ModuleNotFoundError as err:
raise ResourceError(f"Program {name} is registered with QCEngine, but cannot be found.") from err
return ret
def list_all_programs() -> Set[str]:
"""
List all programs registered by QCEngine.
"""
return set(programs.keys())
def list_available_programs() -> Set[str]:
"""
List all programs that can be exectued (found) by QCEngine.
"""
ret = set()
for k, p in programs.items():
if p.found():
ret.add(k)
return ret
# Quantum
register_program(CFOURHarness())
register_program(EntosHarness())
register_program(GAMESSHarness())
register_program(MolproHarness())
register_program(NWChemHarness())
register_program(Psi4Harness())
register_program(QChemHarness())
register_program(TeraChemHarness())
register_program(TurbomoleHarness())
# Semi-empirical
register_program(MopacHarness())
# AI
register_program(TorchANIHarness())
# Molecular Mechanics
register_program(RDKitHarness())
register_program(OpenMMHarness())
# Analytical Corrections
register_program(DFTD3Harness())
register_program(MP2DHarness())
|
the-stack_0_10177 | # Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: image_data
:platform: Unix
:synopsis: A module for loading any of the FabIO python module supported \
image formats (e.g. tiffs)
.. moduleauthor:: Nicola Wadeson <[email protected]>
"""
import os
import fabio
import numpy as np
from savu.data.data_structures.data_types.base_type import BaseType
class ImageData(BaseType):
""" This class loads any of the FabIO python module
supported image formats. """
def __init__(self, folder, Data, dim, shape=None, data_prefix=None):
self.folder = folder
self._data_obj = Data
self.frame_dim = dim
self.shape = shape
self.prefix = data_prefix
super(ImageData, self).__init__()
self.nFrames = None
self.file_names = self.__get_file_names(folder, data_prefix)
self.start_file = fabio.open(self.file_names[0])
self.dtype = self.start_file.data[0, 0].dtype
self.image_shape = (self.start_file.dim2, self.start_file.dim1)
if shape is None:
self.shape = (self.nFrames,)
else:
self.shape = shape
self.full_shape = self.image_shape + self.shape
self.image_dims = set(np.arange(len(self.full_shape)))\
.difference(set(self.frame_dim))
def clone_data_args(self, args, kwargs, extras):
args = ['folder', 'self', 'frame_dim']
kwargs['shape'] = 'shape'
kwargs['prefix'] = 'prefix'
return args, kwargs, extras
def __getitem__(self, index):
index = [index[i] if index[i].start is not None else
slice(0, self.shape[i]) for i in range(len(index))]
size = [len(np.arange(i.start, i.stop, i.step)) for i in index]
data = np.empty(size, dtype=self.dtype)
tiff_slices = [index[i] for i in self.image_dims]
# shift tiff dims to start from 0
index = list(index)
for i in self.image_dims:
end = \
len(np.arange(0, index[i].stop-index[i].start, index[i].step))
index[i] = slice(0, end, 1)
index, frameidx = self.__get_indices(index, size)
for i in range(len(frameidx)):
image = fabio.open(self.file_names[frameidx[i]]).data[tuple(tiff_slices)]
for d in self.frame_dim:
image = np.expand_dims(image, axis=d)
data[tuple(index[i])] = image
return data
def __get_file_names(self, folder, prefix):
import re
import glob
# files = os.listdir(folder)
fullpath = str.strip(folder)
if prefix is not None:
fullpath = os.path.join(folder, prefix + '*')
else:
fullpath = os.path.join(fullpath, '*')
files = glob.glob(fullpath)
self.nFrames = len(files)
file_nos = [int(re.findall(r'\d+', f)[-1]) for f in files]
sort_idx = np.argsort(file_nos)
self.start_no = file_nos[sort_idx[0]]
return list(np.array(files)[sort_idx])
def get_shape(self):
dims = list(self.image_dims) + self.frame_dim
shape = [x for _, x in sorted(zip(dims, self.full_shape))]
return tuple(shape)
def __get_idx(self, dim, sl, shape):
c = int(np.prod(shape[0:dim]))
r = int(np.prod(shape[dim+1:]))
vals = np.arange(sl.start, sl.stop, sl.step)
vals_shift = np.arange(0, len(vals), 1)
return [np.ravel(np.kron(v, np.ones((r, c)))) for v in \
[vals, vals_shift]]
def __get_indices(self, index, size):
""" Get the indices for the new data array and the file numbers. """
# indices for non-image dims only
sub_idx = np.array(index)[np.array(self.frame_dim)]
sub_size = [size[i] for i in self.frame_dim]
idx_list = []
frame_list = []
for dim in range(len(sub_idx)):
frame, idx = self.__get_idx(dim, sub_idx[dim], sub_size)
frame_list.append(frame.astype(int))
idx_list.append(idx.astype(int))
lshape = idx_list[0].shape[0] # this is just size of first frame dim? sub_size[0]
index = np.tile(index, (lshape, 1))
frameidx = np.zeros(lshape)
for dim in range(len(sub_idx)):
index[:, self.frame_dim[dim]] = \
[slice(i, i+1, 1) for i in idx_list[dim]]
frameidx[:] += frame_list[dim]*np.prod(self.shape[dim+1:])
return index.tolist(), frameidx.astype(int)
|
the-stack_0_10178 | import boto3
import botocore
import os
import io
import json
import time
import sys
from google.protobuf import text_format
from tensorflow.python.training.checkpoint_state_pb2 import CheckpointState
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("SageS3Client")
class SageS3Client():
def __init__(self, bucket=None, s3_prefix=None, aws_region=None):
self.aws_region = aws_region
self.bucket = bucket
self.s3_prefix = s3_prefix
self.config_key = os.path.normpath(s3_prefix + "/ip/ip.json")
self.markov_prefix = os.path.normpath(s3_prefix + "/markov")
self.hyperparameters_key = os.path.normpath(s3_prefix + "/ip/hyperparameters.json")
self.done_file_key = os.path.normpath(s3_prefix + "/ip/done")
self.model_checkpoints_prefix = os.path.normpath(s3_prefix + "/model/") + "/"
self.lock_file = ".lock"
logger.info("Initializing SageS3Client...")
def get_client(self):
session = boto3.session.Session()
return session.client('s3', region_name=self.aws_region)
def _get_s3_key(self, key):
return os.path.normpath(self.model_checkpoints_prefix + "/" + key)
def download_markov(self):
s3_client = self.get_client()
response = s3_client.list_objects_v2(Bucket=self.bucket,
Prefix=self.markov_prefix)
if "Contents" in response:
for i in response["Contents"]:
if ".ipynb_checkpoints" in i["Key"]:
continue
s3_client.download_file(Bucket=self.bucket,
Key=i["Key"],
Filename=i["Key"].replace(self.markov_prefix,"./custom_files/markov"))
logger.info("Downloaded %s" % i["Key"])
def write_ip_config(self, ip):
s3_client = self.get_client()
data = {"IP": ip}
json_blob = json.dumps(data)
file_handle = io.BytesIO(json_blob.encode())
file_handle_done = io.BytesIO(b'done')
s3_client.upload_fileobj(file_handle, self.bucket, self.config_key)
s3_client.upload_fileobj(file_handle_done, self.bucket, self.done_file_key)
def upload_hyperparameters(self, hyperparams_json):
s3_client = self.get_client()
file_handle = io.BytesIO(hyperparams_json.encode())
s3_client.upload_fileobj(file_handle, self.bucket, self.hyperparameters_key)
def upload_model(self, checkpoint_dir):
s3_client = self.get_client()
num_files = 0
for root, dirs, files in os.walk("./" + checkpoint_dir):
for filename in files:
abs_name = os.path.abspath(os.path.join(root, filename))
s3_client.upload_file(abs_name,
self.bucket,
"%s/%s/%s" % (self.s3_prefix, checkpoint_dir, filename))
num_files += 1
def download_model(self, checkpoint_dir):
s3_client = self.get_client()
filename = "None"
try:
filename = os.path.abspath(os.path.join(checkpoint_dir, "checkpoint"))
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
while True:
response = s3_client.list_objects_v2(Bucket=self.bucket,
Prefix=self._get_s3_key(self.lock_file))
if "Contents" not in response:
# If no lock is found, try getting the checkpoint
try:
s3_client.download_file(Bucket=self.bucket,
Key=self._get_s3_key("checkpoint"),
Filename=filename)
except Exception as e:
time.sleep(2)
continue
else:
time.sleep(2)
continue
ckpt = CheckpointState()
if os.path.exists(filename):
contents = open(filename, 'r').read()
text_format.Merge(contents, ckpt)
rel_path = ckpt.model_checkpoint_path
checkpoint = int(rel_path.split('_Step')[0])
response = s3_client.list_objects_v2(Bucket=self.bucket,
Prefix=self._get_s3_key(rel_path))
if "Contents" in response:
num_files = 0
for obj in response["Contents"]:
filename = os.path.abspath(os.path.join(checkpoint_dir,
obj["Key"].replace(self.model_checkpoints_prefix,
"")))
s3_client.download_file(Bucket=self.bucket,
Key=obj["Key"],
Filename=filename)
num_files += 1
return True
except Exception as e:
logger.error("{} while downloading the model {} from S3".format(e, filename))
return False
def get_ip(self):
s3_client = self.get_client()
self._wait_for_ip_upload()
try:
s3_client.download_file(self.bucket, self.config_key, 'ip.json')
with open("ip.json") as f:
ip = json.load(f)["IP"]
return ip
except Exception as e:
logger.error("Exception [{}] occured, Cannot fetch IP of redis server running in SageMaker. Job failed!".format(e))
sys.exit(1)
def _wait_for_ip_upload(self, timeout=600):
s3_client = self.get_client()
time_elapsed = 0
while True:
response = s3_client.list_objects(Bucket=self.bucket, Prefix=self.done_file_key)
if "Contents" not in response:
time.sleep(1)
time_elapsed += 1
if time_elapsed % 5 == 0:
logger.info ("Waiting for SageMaker Redis server IP... Time elapsed: %s seconds" % time_elapsed)
if time_elapsed >= timeout:
logger.error("Cannot retrieve IP of redis server running in SageMaker. Job failed!")
sys.exit(1)
else:
return
def download_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.download_file(self.bucket, s3_key, local_path)
return True
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
logger.info("Exception [{}] occured on download file-{} from s3 bucket-{} key-{}".format(e.response['Error'], local_path, self.bucket, s3_key))
return False
else:
logger.error("boto client exception error [{}] occured on download file-{} from s3 bucket-{} key-{}"
.format(e.response['Error'], local_path, self.bucket, s3_key))
return False
except Exception as e:
logger.error("Exception [{}] occcured on download file-{} from s3 bucket-{} key-{}".format(e, local_path, self.bucket, s3_key))
return False
def upload_file(self, s3_key, local_path):
s3_client = self.get_client()
try:
s3_client.upload_file(Filename=local_path,
Bucket=self.bucket,
Key=s3_key)
return True
except Exception as e:
logger.error("{} on upload file-{} to s3 bucket-{} key-{}".format(e, local_path, self.bucket, s3_key))
return False
if __name__ == '__main__':
CUSTOM_FILES_PATH = "./custom_files"
dirs_to_create = ["./custom_files",
"./custom_files/markov",
"./custom_files/markov/actions",
"./custom_files/markov/presets",
"./custom_files/markov/environments",
"./custom_files/markov/rewards"
]
for path in dirs_to_create:
if not os.path.exists(path):
os.makedirs(path)
s3_bucket = os.environ.get("SAGEMAKER_SHARED_S3_BUCKET", "gsaur-test")
s3_prefix = os.environ.get("SAGEMAKER_SHARED_S3_PREFIX", "sagemaker")
aws_region = os.environ.get("APP_REGION", "us-east-1")
s3_client = SageS3Client(bucket=s3_bucket, s3_prefix=s3_prefix, aws_region=aws_region)
s3_client.download_markov()
|
the-stack_0_10181 | from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.grupo import Grupo
from ..models.trecho import Trecho
from ..models.viagem import Viagem
from ..types import UNSET, Unset
T = TypeVar("T", bound="PurchaseEventIn")
@attr.s(auto_attribs=True)
class PurchaseEventIn:
"""
Attributes:
trecho (Trecho):
grupo (Grupo):
viagem (Viagem):
token (Union[Unset, str]):
"""
trecho: Trecho
grupo: Grupo
viagem: Viagem
token: Union[Unset, str] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
trecho = self.trecho.to_dict()
grupo = self.grupo.to_dict()
viagem = self.viagem.to_dict()
token = self.token
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update(
{
"trecho": trecho,
"grupo": grupo,
"viagem": viagem,
}
)
if token is not UNSET:
field_dict["token"] = token
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
trecho = Trecho.from_dict(d.pop("trecho"))
grupo = Grupo.from_dict(d.pop("grupo"))
viagem = Viagem.from_dict(d.pop("viagem"))
token = d.pop("token", UNSET)
purchase_event_in = cls(
trecho=trecho,
grupo=grupo,
viagem=viagem,
token=token,
)
purchase_event_in.additional_properties = d
return purchase_event_in
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
|
the-stack_0_10184 | '''
This module has utilities1 for the arithmetic functions.
The parameters are of variable length.
'''
__author__ = 'vinay'
__version__ = "alpha_1"
def myvsum(*args):
'''
function which takes in variable count of numbers
and returns their sum
'''
s = 0
for n in args:
s = s + n
return s
def myvproduct(*args):
'''
function which takes in variable count of numbers
and returns their product
'''
p = 1
for n in args:
p = p * n
return p
if __name__ == '__main__':
s = myvsum(1,2,3,4,5,6,7)
print(s)
p = myvproduct(1,2,3,4,5,6,7)
print(p) |
the-stack_0_10186 | import logging
from typing import List, Optional, Sequence
import telebot
from quiz_bot.entity import (
AnswerEvaluation,
AnyChallengeInfo,
ChallengeSettings,
CheckedResult,
ContextChallenge,
ContextParticipant,
ContextUser,
EvaluationStatus,
PictureModel,
QuizState,
RegularChallengeInfo,
UnexpectedChallengeAmountError,
)
from quiz_bot.quiz.errors import ChallengeNotFoundError, NullableParticipantError
from quiz_bot.quiz.keeper import ChallengeKeeper
from quiz_bot.quiz.registrar import Registrar
from quiz_bot.storage import IChallengeStorage
logger = logging.getLogger(__name__)
class ChallengeMaster:
def __init__(
self, storage: IChallengeStorage, settings: ChallengeSettings, registrar: Registrar, keeper: ChallengeKeeper,
) -> None:
self._storage = storage
self._settings = settings
self._registrar = registrar
self._keeper = keeper
self._sync_challenge()
if all((self._settings.autostart, not self._keeper.has_data or self._keeper.finished,)):
self.start_next_challenge()
@property
def keeper(self) -> ChallengeKeeper:
return self._keeper
@property
def _is_last_challenge(self) -> bool:
return self._keeper.has_data and self._keeper.number == self._settings.challenge_amount
def _save_challenge_data(self, challenge: ContextChallenge) -> None:
self._keeper.set(data=challenge, info=self._settings.get_challenge_model(challenge.id))
def _sync_challenge(self) -> None:
actual_challenge = self._storage.get_actual_challenge()
if actual_challenge is not None:
logger.info("Actual challenge with ID %s", actual_challenge.id)
self._save_challenge_data(actual_challenge)
return
finished_challenge_ids = self._storage.get_finished_challenge_ids()
if not finished_challenge_ids:
logger.info("Quiz has not been running yet.")
return
if len(finished_challenge_ids) > self._settings.challenge_amount:
raise UnexpectedChallengeAmountError(
f"Not equal challenge amount: expected {self._settings.challenge_amount}, "
f"got {len(finished_challenge_ids)} finished challenges!"
)
logger.info("Quiz is not running now. Finished challenges: %s", finished_challenge_ids)
challenge = self._storage.get_challenge(finished_challenge_ids[-1])
if challenge is None:
raise ChallengeNotFoundError("Could not found finished challenge - WTF?")
self._save_challenge_data(challenge)
def resolve_quiz_state(self) -> QuizState:
self._sync_challenge()
if not self._keeper.has_data:
return QuizState.NEW
if self._keeper.finished:
if self._is_last_challenge:
return QuizState.FINISHED
return QuizState.WAIT_NEXT
return QuizState.IN_PROGRESS
def _get_next_challenge_info(self) -> AnyChallengeInfo:
if not self._keeper.has_data:
return self._settings.get_challenge_model(1)
return self._settings.get_challenge_model(self._keeper.number + 1)
def start_next_challenge(self) -> None:
next_challenge_info = self._get_next_challenge_info()
next_challenge = self._storage.create_challenge(
name=next_challenge_info.name,
phase_amount=next_challenge_info.phase_amount,
winner_amount=next_challenge_info.max_winners,
duration=next_challenge_info.duration,
)
logger.info("Next challenge: %s", next_challenge)
self._sync_challenge()
def _get_evaluation(
self, status: EvaluationStatus, replies: Optional[Sequence[str]] = (), picture: Optional[PictureModel] = None
) -> AnswerEvaluation:
return AnswerEvaluation(status=status, replies=replies, quiz_state=self.resolve_quiz_state(), picture=picture)
def start_challenge_for_user(
self,
user: ContextUser,
status: EvaluationStatus = EvaluationStatus.NOT_CHECKED,
additional_replies: Sequence[str] = (),
) -> AnswerEvaluation:
participant = self._registrar.get_participation_for_user(user=user, challenge=self._keeper.data)
if participant is None:
participant = self._registrar.create_participation_for_user(user=user, challenge=self._keeper.data)
result = self._keeper.checker.create_initial_phase(participant=participant)
logger.warning("Started challenge ID %s for user @%s", self._keeper.number, user.nick_name)
replies = list(additional_replies) + [
self._settings.get_start_notification(
challenge_num=self._keeper.number,
challenge_name=self._keeper.info.name,
description=f"{self._keeper.info.description}",
),
]
if isinstance(self._keeper.info, RegularChallengeInfo):
replies.append(
self._settings.get_next_answer_notification(
question=self._keeper.info.get_question(result.phase), question_num=result.phase,
)
)
return self._get_evaluation(status=status, replies=replies, picture=self._keeper.info.picture,)
return self._get_evaluation(
status=status,
replies=[self._settings.get_already_started_notification(challenge_name=self._keeper.info.name)],
)
def _resolve_next_event(self, participant: ContextParticipant, result: CheckedResult) -> AnswerEvaluation:
status = EvaluationStatus.CORRECT
if result.next_phase is not None:
replies: List[str] = []
if isinstance(self._keeper.info, RegularChallengeInfo):
replies.append(
self._settings.get_next_answer_notification(
question=self._keeper.info.get_question(result.next_phase), question_num=result.next_phase,
)
)
return self._get_evaluation(status=status, replies=replies)
self._registrar.finish_participation(participant)
pretender_replies = [
self._settings.get_pretender_notification(
challenge_name=self._keeper.info.name, scores=participant.scores, finished_at=participant.finished_at,
)
]
has_all_winners = self._registrar.all_winners_exist(challenge=self._keeper.data)
if not has_all_winners:
return self._get_evaluation(status=status, replies=pretender_replies)
self._storage.finish_actual_challenge()
logger.info(
"Challenge #%s '%s' finished with all winners resolution!", self._keeper.number, self._keeper.info.name,
)
if not self._is_last_challenge and self._settings.autostart:
self.start_next_challenge()
return self.start_challenge_for_user(
user=participant.user, status=status, additional_replies=pretender_replies
)
return self._get_evaluation(status=status, replies=pretender_replies)
def evaluate(self, user: ContextUser, message: telebot.types.Message) -> AnswerEvaluation: # noqa: C901
if self._keeper.out_of_date:
self._storage.finish_actual_challenge()
participant = self._registrar.get_participation_for_user(user=user, challenge=self._keeper.data)
if participant is None:
logger.info(
"User @%s is not a Participant for challenge with ID %s!", user.nick_name, self._keeper.data.id,
)
return self._get_evaluation(status=EvaluationStatus.NOT_CHECKED)
if participant.completed_challenge:
logger.info(
"User @%s has already completed challenge with ID %s!", user.nick_name, self._keeper.data.id,
)
return self._get_evaluation(status=EvaluationStatus.ALREADY_COMPLETED)
if self._keeper.finished:
return self._get_evaluation(status=EvaluationStatus.INCORRECT)
checked_result = self._keeper.checker.check_answer(
participant=participant, data=self._keeper.data, info=self._keeper.info, message=message # type: ignore
)
if not checked_result.correct:
return self._get_evaluation(status=EvaluationStatus.INCORRECT)
self._registrar.add_correct_answer(participant)
logger.info("Added +1 score for user '%s'!", user.nick_name)
return self._resolve_next_event(participant=participant, result=checked_result)
def skip_evaluation(self, user: ContextUser) -> AnswerEvaluation:
participant = self._registrar.get_participation_for_user(user=user, challenge=self._keeper.data)
if participant is None:
raise NullableParticipantError
unchecked_result = self._keeper.checker.skip_question(participant=participant, data=self._keeper.data)
return self._resolve_next_event(participant=participant, result=unchecked_result)
def get_challenge_info(self, challenge_id: Optional[int] = None) -> str:
if not isinstance(challenge_id, int):
logger.info("Challenge ID was not specified, so use current challenge information.")
challenge_id = self._keeper.number
context_challenge = self._storage.get_challenge(challenge_id)
if context_challenge is None:
raise ChallengeNotFoundError(f"Challenge with ID {challenge_id} was not found!")
self._save_challenge_data(context_challenge)
winner_results = self._registrar.get_winners(self._keeper.data)
if not self._keeper.finished:
results = self._settings.get_time_left_info(self._keeper.finish_after)
else:
results = (
"\n".join(self._settings.get_results_info(winner_results))
+ "\n\n"
+ self._settings.get_time_over_info(self._keeper.data)
)
return self._settings.get_challenge_info(number=self._keeper.number, info=self._keeper.info, results=results)
|
the-stack_0_10188 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Resource Constants
RESOURCE_ADMIN_MENU = "Admin"
RESOURCE_AIRFLOW = "Airflow"
RESOURCE_AUDIT_LOG = "Audit Logs"
RESOURCE_BROWSE_MENU = "Browse"
RESOURCE_DAG = "DAGs"
RESOURCE_DAG_PREFIX = "DAG:"
RESOURCE_DOCS_MENU = "Docs"
RESOURCE_DOCS = "Documentation"
RESOURCE_CONFIG = "Configurations"
RESOURCE_CONNECTION = "Connections"
RESOURCE_DAG_CODE = "DAG Code"
RESOURCE_DAG_RUN = "DAG Runs"
RESOURCE_IMPORT_ERROR = "ImportError"
RESOURCE_JOB = "Jobs"
RESOURCE_POOL = "Pools"
RESOURCE_PLUGIN = "Plugins"
RESOURCE_SLA_MISS = "SLA Misses"
RESOURCE_TASK_INSTANCE = "Task Instances"
RESOURCE_TASK_LOG = "Task Logs"
RESOURCE_TASK_RESCHEDULE = "Task Reschedules"
RESOURCE_VARIABLE = "Variables"
RESOURCE_WEBSITE = "Website"
RESOURCE_XCOM = "XComs"
RESOURCE_USERINFO_EDIT_VIEW = "UserInfoEditView"
RESOURCE_RESET_MY_PASSWORD_VIEW = "ResetMyPasswordView"
RESOURCE_USER_DB_MODELVIEW = "UserDBModelView"
RESOURCE_USER_OID_MODELVIEW = "UserOIDModelView"
RESOURCE_USER_LDAP_MODELVIEW = "UserLDAPModelView"
RESOURCE_USER_OAUTH_MODELVIEW = "UserOAuthModelView"
RESOURCE_USER_REMOTEUSER_MODELVIEW = "UserRemoteUserModelView"
RESOURCE_ROLE_MODEL_VIEW = "RoleModelView"
RESOURCE_PERMISSION_MODEL_VIEW = "PermissionModelView"
# Action Constants
ACTION_CAN_LIST = "can_list"
ACTION_CAN_SHOW = "can_show"
ACTION_CAN_CREATE = "can_create"
ACTION_CAN_READ = "can_read"
ACTION_CAN_EDIT = "can_edit"
ACTION_CAN_DELETE = "can_delete"
ACTION_CAN_ACCESS_MENU = "menu_access"
ACTION_CAN_THIS_FORM_GET = "can_this_form_get"
ACTION_CAN_THIS_FORM_POST = "can_this_form_post"
ACTION_RESETMYPASSWORD = "resetmypassword"
ACTION_CAN_USERINFO = "can_userinfo"
ACTION_USERINFOEDIT = "userinfoedit"
DEPRECATED_ACTION_CAN_DAG_READ = "can_dag_read"
DEPRECATED_ACTION_CAN_DAG_EDIT = "can_dag_edit"
|
the-stack_0_10191 | # Copyright The Linux Foundation and each contributor to CommunityBridge.
# SPDX-License-Identifier: MIT
import pytest
import cla
import pynamodb
from unittest.mock import Mock, patch, MagicMock
from cla.models.dynamo_models import GitHubOrg, GitHubOrgModel
from cla.utils import get_github_organization_instance
from cla.tests.unit.data import GH_TABLE_DESCRIPTION
PATCH_METHOD = "pynamodb.connection.Connection._make_api_call"
@pytest.fixture()
def gh_instance():
""" GitHubOrg instance """
with patch(PATCH_METHOD) as req:
req.return_value = GH_TABLE_DESCRIPTION
gh_org = cla.utils.get_github_organization_instance()
gh_name = "FOO"
gh_org.set_organization_name(gh_name)
gh_org.set_organization_sfid("foo_sf_id")
gh_org.set_project_sfid("foo_sf_id")
gh_org.save()
yield gh_org
def test_set_organization_name(gh_instance):
""" Test setting GitHub org name #1126 """
assert gh_instance.get_organization_name_lower() == "foo"
def test_get_org_by_name_lower(gh_instance):
""" Test getting GitHub org with case insensitive search """
gh_org = cla.utils.get_github_organization_instance()
gh_org.model.scan = Mock(return_value=[gh_instance.model])
found_gh_org = gh_org.get_organization_by_lower_name(gh_instance.get_organization_name())
assert found_gh_org.get_organization_name_lower() == gh_instance.get_organization_name_lower()
|
the-stack_0_10195 | # messageBox.py
import ctypes
user_handle = ctypes.WinDLL("User32.dll") # Handle to User32.dll
kernel_handle = ctypes.WinDLL("kernel32.dll") # Handle to Kernel32.dll
# WinAPI: MessageBoxW
hWnd = None
lpText = "Message Box"
lpCaption = "Pop Up"
uType = 0x00000001
response = user_handle.MessageBoxW(hWnd, lpText, lpCaption, uType)
# Error Handling
error = kernel_handle.GetLastError()
if error != 0:
print("[-] Error Code: {0}".format(error))
if response == 1:
print("[+] User Clicked OK")
elif response == 2:
print("[+] User Clicked CANCEL")
|
the-stack_0_10198 | # -*- coding: utf-8 -*-
# Copyright (c) 2019 - 2020 Simon Kern
# Copyright (c) 2015 - 2020 Holger Nahrstaedt
# Copyright (c) 2011, 2015, Chris Lee-Messer
# Copyright (c) 2016-2017 The pyedflib Developers
# <https://github.com/holgern/pyedflib>
# See LICENSE for license details.
import numpy as np
import sys
from datetime import datetime, date
from ._extensions._pyedflib import FILETYPE_EDFPLUS, FILETYPE_BDFPLUS, FILETYPE_BDF, FILETYPE_EDF
from ._extensions._pyedflib import open_file_writeonly, set_physical_maximum, set_patient_additional, set_digital_maximum
from ._extensions._pyedflib import set_birthdate, set_digital_minimum, set_technician, set_recording_additional, set_patientname
from ._extensions._pyedflib import set_patientcode, set_equipment, set_admincode, set_gender, set_datarecord_duration, set_number_of_annotation_signals
from ._extensions._pyedflib import set_startdatetime, set_starttime_subsecond, set_samplefrequency, set_physical_minimum, set_label, set_physical_dimension
from ._extensions._pyedflib import set_transducer, set_prefilter, write_physical_samples, close_file, write_annotation_latin1, write_annotation_utf8
from ._extensions._pyedflib import blockwrite_physical_samples, write_errors, blockwrite_digital_samples, write_digital_short_samples, write_digital_samples, blockwrite_digital_short_samples
__all__ = ['EdfWriter']
def u(x):
return x.decode("utf-8", "strict")
def du(x):
if isbytestr(x):
return x
else:
return x.encode("utf-8")
def isstr(s):
try:
return isinstance(s, basestring)
except NameError:
return isinstance(s, str)
def isbytestr(s):
return isinstance(s, bytes)
def gender2int(gender):
if isinstance(gender, int) or gender is None:
return gender
elif gender.lower() in ['', 'x', 'xx', 'xxx', 'unknown', '?', '??']:
return None
elif gender.lower() in ["female", "woman", "f", "w"]:
return 0
elif gender.lower() in ["male", "man", "m"]:
return 1
else:
raise ValueError("Unknown gender: '{}'".format(gender))
class ChannelDoesNotExist(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class WrongInputSize(Exception):
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
class EdfWriter(object):
def __exit__(self, exc_type, exc_val, ex_tb):
self.close()
def __enter__(self):
return self
# return self
def __del__(self):
self.close()
def __init__(self, file_name, n_channels,
file_type=FILETYPE_EDFPLUS):
"""Initialises an EDF file at file_name.
file_type is one of
edflib.FILETYPE_EDFPLUS
edflib.FILETYPE_BDFPLUS
n_channels is the number of channels without the annotation channel
channel_info should be a
list of dicts, one for each channel in the data. Each dict needs
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
"""
self.path = file_name
self.file_type = file_type
self.patient_name = ''
self.patient_code = ''
self.technician = ''
self.equipment = ''
self.recording_additional = ''
self.patient_additional = ''
self.admincode = ''
self.gender = None
self.recording_start_time = datetime.now().replace(microsecond=0)
self.birthdate = ''
self.duration = 1
self.number_of_annotations = 1 if file_type in [FILETYPE_EDFPLUS, FILETYPE_BDFPLUS] else 0
self.n_channels = n_channels
self.channels = []
self.sample_buffer = []
for i in np.arange(self.n_channels):
if self.file_type == FILETYPE_BDFPLUS or self.file_type == FILETYPE_BDF:
self.channels.append({'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 8388607,'digital_min': -8388608,
'prefilter': 'pre1', 'transducer': 'trans1'})
elif self.file_type == FILETYPE_EDFPLUS or self.file_type == FILETYPE_EDF:
self.channels.append({'label': 'test_label', 'dimension': 'mV', 'sample_rate': 100,
'physical_max': 1.0, 'physical_min': -1.0,
'digital_max': 32767, 'digital_min': -32768,
'prefilter': 'pre1', 'transducer': 'trans1'})
self.sample_buffer.append([])
self.handle = open_file_writeonly(self.path, self.file_type, self.n_channels)
if (self.handle < 0):
raise IOError(write_errors[self.handle])
def update_header(self):
"""
Updates header to edffile struct
"""
set_technician(self.handle, du(self.technician))
set_recording_additional(self.handle, du(self.recording_additional))
set_patientname(self.handle, du(self.patient_name))
set_patientcode(self.handle, du(self.patient_code))
set_patient_additional(self.handle, du(self.patient_additional))
set_equipment(self.handle, du(self.equipment))
set_admincode(self.handle, du(self.admincode))
set_gender(self.handle, gender2int(self.gender))
set_datarecord_duration(self.handle, self.duration)
set_number_of_annotation_signals(self.handle, self.number_of_annotations)
set_startdatetime(self.handle, self.recording_start_time.year, self.recording_start_time.month,
self.recording_start_time.day, self.recording_start_time.hour,
self.recording_start_time.minute, self.recording_start_time.second)
# subseconds are noted in nanoseconds, so we multiply by 100
if self.recording_start_time.microsecond>0:
set_starttime_subsecond(self.handle, self.recording_start_time.microsecond*100)
if isstr(self.birthdate):
if self.birthdate != '':
birthday = datetime.strptime(self.birthdate, '%d %b %Y').date()
set_birthdate(self.handle, birthday.year, birthday.month, birthday.day)
else:
set_birthdate(self.handle, self.birthdate.year, self.birthdate.month, self.birthdate.day)
for i in np.arange(self.n_channels):
set_samplefrequency(self.handle, i, self.channels[i]['sample_rate'])
set_physical_maximum(self.handle, i, self.channels[i]['physical_max'])
set_physical_minimum(self.handle, i, self.channels[i]['physical_min'])
set_digital_maximum(self.handle, i, self.channels[i]['digital_max'])
set_digital_minimum(self.handle, i, self.channels[i]['digital_min'])
set_label(self.handle, i, du(self.channels[i]['label']))
set_physical_dimension(self.handle, i, du(self.channels[i]['dimension']))
set_transducer(self.handle, i, du(self.channels[i]['transducer']))
set_prefilter(self.handle, i, du(self.channels[i]['prefilter']))
def setHeader(self, fileHeader):
"""
Sets the file header
"""
self.technician = fileHeader["technician"]
self.recording_additional = fileHeader["recording_additional"]
self.patient_name = fileHeader["patientname"]
self.patient_additional = fileHeader["patient_additional"]
self.patient_code = fileHeader["patientcode"]
self.equipment = fileHeader["equipment"]
self.admincode = fileHeader["admincode"]
self.gender = fileHeader["gender"]
self.recording_start_time = fileHeader["startdate"]
self.birthdate = fileHeader["birthdate"]
self.update_header()
def setSignalHeader(self, edfsignal, channel_info):
"""
Sets the parameter for signal edfsignal.
channel_info should be a dict with
these values:
'label' : channel label (string, <= 16 characters, must be unique)
'dimension' : physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : sample frequency in hertz (int)
'physical_max' : maximum physical value (float)
'physical_min' : minimum physical value (float)
'digital_max' : maximum digital value (int, -2**15 <= x < 2**15)
'digital_min' : minimum digital value (int, -2**15 <= x < 2**15)
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal] = channel_info
self.update_header()
def setSignalHeaders(self, signalHeaders):
"""
Sets the parameter for all signals
Parameters
----------
signalHeaders : array_like
containing dict with
'label' : str
channel label (string, <= 16 characters, must be unique)
'dimension' : str
physical dimension (e.g., mV) (string, <= 8 characters)
'sample_rate' : int
sample frequency in hertz
'physical_max' : float
maximum physical value
'physical_min' : float
minimum physical value
'digital_max' : int
maximum digital value (-2**15 <= x < 2**15)
'digital_min' : int
minimum digital value (-2**15 <= x < 2**15)
"""
for edfsignal in np.arange(self.n_channels):
self.channels[edfsignal] = signalHeaders[edfsignal]
self.update_header()
def setTechnician(self, technician):
"""
Sets the technicians name to `technician`.
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
self.technician = technician
self.update_header()
def setRecordingAdditional(self, recording_additional):
"""
Sets the additional recordinginfo
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
self.recording_additional = recording_additional
self.update_header()
def setPatientName(self, patient_name):
"""
Sets the patientname to `patient_name`.
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
self.patient_name = patient_name
self.update_header()
def setPatientCode(self, patient_code):
"""
Sets the patientcode to `patient_code`.
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
self.patient_code = patient_code
self.update_header()
def setPatientAdditional(self, patient_additional):
"""
Sets the additional patientinfo to `patient_additional`.
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
self.patient_additional = patient_additional
self.update_header()
def setEquipment(self, equipment):
"""
Sets the name of the param equipment used during the aquisition.
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
Parameters
----------
equipment : str
Describes the measurement equpipment
"""
self.equipment = equipment
self.update_header()
def setAdmincode(self, admincode):
"""
Sets the admincode.
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
Parameters
----------
admincode : str
admincode which is written into the header
"""
self.admincode = admincode
self.update_header()
def setGender(self, gender):
"""
Sets the gender.
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
Parameters
----------
gender : int
1 is male, 0 is female
"""
self.gender = gender2int(gender)
self.update_header()
def setDatarecordDuration(self, duration):
"""
Sets the datarecord duration. The default value is 100000 which is 1 second.
ATTENTION: the argument "duration" is expressed in units of 10 microSeconds!
So, if you want to set the datarecord duration to 0.1 second, you must give
the argument "duration" a value of "10000".
This function is optional, normally you don't need to change
the default value. The datarecord duration must be in the range 0.001 to 60 seconds.
Returns 0 on success, otherwise -1.
Parameters
----------
duration : integer
Sets the datarecord duration in units of 10 microSeconds
Notes
-----
This function is NOT REQUIRED but can be called after opening a file in writemode and
before the first sample write action. This function can be used when you want
to use a samplerate which is not an integer. For example, if you want to use
a samplerate of 0.5 Hz, set the samplefrequency to 5 Hz and
the datarecord duration to 10 seconds. Do not use this function,
except when absolutely necessary!
"""
self.duration = duration
self.update_header()
def set_number_of_annotation_signals(self, number_of_annotations):
"""
Sets the number of annotation signals. The default value is 1
This function is optional and can be called only after opening a file in writemode
and before the first sample write action
Normally you don't need to change the default value. Only when the number of annotations
you want to write is more than the number of seconds of the duration of the recording, you can use
this function to increase the storage space for annotations
Minimum is 1, maximum is 64
Parameters
----------
number_of_annotations : integer
Sets the number of annotation signals
"""
number_of_annotations = max((min((int(number_of_annotations), 64)), 1))
self.number_of_annotations = number_of_annotations
self.update_header()
def setStartdatetime(self, recording_start_time):
"""
Sets the recording start Time
Parameters
----------
recording_start_time: datetime object
Sets the recording start Time
"""
if not isinstance(recording_start_time, datetime):
recording_start_time = datetime.strptime(recording_start_time,"%d %b %Y %H:%M:%S")
self.recording_start_time = recording_start_time
self.update_header()
def setBirthdate(self, birthdate):
"""
Sets the birthdate.
Parameters
----------
birthdate: date object from datetime
Examples
--------
>>> import pyedflib
>>> from datetime import datetime, date
>>> f = pyedflib.EdfWriter('test.bdf', 1, file_type=pyedflib.FILETYPE_BDFPLUS)
>>> f.setBirthdate(date(1951, 8, 2))
>>> f.close()
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
if isinstance(birthdate, str):
birthdate = datetime.strptime(birthdate, "%d.%m.%Y")
self.birthdate = birthdate
self.update_header()
def setSamplefrequency(self, edfsignal, samplefrequency):
"""
Sets the samplefrequency of signal edfsignal.
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['sample_rate'] = samplefrequency
self.update_header()
def setPhysicalMaximum(self, edfsignal, physical_maximum):
"""
Sets the physical_maximum of signal edfsignal.
Parameters
----------
edfsignal: int
signal number
physical_maximum: float
Sets the physical maximum
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['physical_max'] = physical_maximum
self.update_header()
def setPhysicalMinimum(self, edfsignal, physical_minimum):
"""
Sets the physical_minimum of signal edfsignal.
Parameters
----------
edfsignal: int
signal number
physical_minimum: float
Sets the physical minimum
Notes
-----
This function is required for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['physical_min'] = physical_minimum
self.update_header()
def setDigitalMaximum(self, edfsignal, digital_maximum):
"""
Sets the maximum digital value of signal edfsignal.
Usually, the value 32767 is used for EDF+ and 8388607 for BDF+.
Parameters
----------
edfsignal : int
signal number
digital_maximum : int
Sets the maximum digital value
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['digital_max'] = digital_maximum
self.update_header()
def setDigitalMinimum(self, edfsignal, digital_minimum):
"""
Sets the minimum digital value of signal edfsignal.
Usually, the value -32768 is used for EDF+ and -8388608 for BDF+. Usually this will be (-(digital_maximum + 1)).
Parameters
----------
edfsignal : int
signal number
digital_minimum : int
Sets the minimum digital value
Notes
-----
This function is optional and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['digital_min'] = digital_minimum
self.update_header()
def setLabel(self, edfsignal, label):
"""
Sets the label (name) of signal edfsignal ("FP1", "SaO2", etc.).
Parameters
----------
edfsignal : int
signal number on which the label should be changed
label : str
signal label
Notes
-----
This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['label'] = label
self.update_header()
def setPhysicalDimension(self, edfsignal, physical_dimension):
"""
Sets the physical dimension of signal edfsignal ("uV", "BPM", "mA", "Degr.", etc.)
:param edfsignal: int
:param physical_dimension: str
Notes
-----
This function is recommended for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['dimension'] = physical_dimension
self.update_header()
def setTransducer(self, edfsignal, transducer):
"""
Sets the transducer of signal edfsignal
:param edfsignal: int
:param transducer: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if (edfsignal < 0 or edfsignal > self.n_channels):
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['transducer'] = transducer
self.update_header()
def setPrefilter(self, edfsignal, prefilter):
"""
Sets the prefilter of signal edfsignal ("HP:0.1Hz", "LP:75Hz N:50Hz", etc.)
:param edfsignal: int
:param prefilter: str
Notes
-----
This function is optional for every signal and can be called only after opening a file in writemode and before the first sample write action.
"""
if edfsignal < 0 or edfsignal > self.n_channels:
raise ChannelDoesNotExist(edfsignal)
self.channels[edfsignal]['prefilter'] = prefilter
self.update_header()
def writePhysicalSamples(self, data):
"""
Writes n physical samples (uV, mA, Ohm) belonging to one signal where n
is the samplefrequency of the signal.
data_vec belonging to one signal. The size must be the samplefrequency of the signal.
Notes
-----
Writes n physical samples (uV, mA, Ohm) from data_vec belonging to one signal where n
is the samplefrequency of the signal. The physical samples will be converted to digital
samples using the values of physical maximum, physical minimum, digital maximum and digital
minimum. The number of samples written is equal to the samplefrequency of the signal.
Call this function for every signal in the file. The order is important! When there are 4
signals in the file, the order of calling this function must be: signal 0, signal 1, signal 2,
signal 3, signal 0, signal 1, signal 2, etc.
All parameters must be already written into the bdf/edf-file.
"""
return write_physical_samples(self.handle, data)
def writeDigitalSamples(self, data):
return write_digital_samples(self.handle, data)
def writeDigitalShortSamples(self, data):
return write_digital_short_samples(self.handle, data)
def blockWritePhysicalSamples(self, data):
"""
Writes physical samples (uV, mA, Ohm)
must be filled with samples from all signals
where each signal has n samples which is the samplefrequency of the signal.
data_vec belonging to one signal. The size must be the samplefrequency of the signal.
Notes
-----
buf must be filled with samples from all signals, starting with signal 0, 1, 2, etc.
one block equals one second
The physical samples will be converted to digital samples using the
values of physical maximum, physical minimum, digital maximum and digital minimum
The number of samples written is equal to the sum of the samplefrequencies of all signals
Size of buf should be equal to or bigger than sizeof(double) multiplied by the sum of the samplefrequencies of all signals
Returns 0 on success, otherwise -1
All parameters must be already written into the bdf/edf-file.
"""
return blockwrite_physical_samples(self.handle, data)
def blockWriteDigitalSamples(self, data):
return blockwrite_digital_samples(self.handle, data)
def blockWriteDigitalShortSamples(self, data):
return blockwrite_digital_short_samples(self.handle, data)
def writeSamples(self, data_list, digital = False):
"""
Writes physical samples (uV, mA, Ohm) from data belonging to all signals
The physical samples will be converted to digital samples using the values
of physical maximum, physical minimum, digital maximum and digital minimum.
if the samplefrequency of all signals are equal, then the data could be
saved into a matrix with the size (N,signals) If the samplefrequency
is different, then sample_freq is a vector containing all the different
samplefrequencys. The data is saved as list. Each list entry contains
a vector with the data of one signal.
If digital is True, digital signals (as directly from the ADC) will be expected.
(e.g. int16 from 0 to 2048)
All parameters must be already written into the bdf/edf-file.
"""
if (len(data_list) != len(self.channels)):
raise WrongInputSize(len(data_list))
if digital:
if any([not np.issubdtype(a.dtype, np.integer) for a in data_list]):
raise TypeError('Digital = True requires all signals in int')
# Check that all channels have different physical_minimum and physical_maximum
for chan in self.channels:
assert chan['physical_min'] != chan['physical_max'], \
'In chan {} physical_min {} should be different from '\
'physical_max {}'.format(chan['label'], chan['physical_min'], chan['physical_max'])
ind = []
notAtEnd = True
for i in np.arange(len(data_list)):
ind.append(0)
sampleLength = 0
sampleRates = np.zeros(len(data_list), dtype=np.int32)
for i in np.arange(len(data_list)):
sampleRates[i] = self.channels[i]['sample_rate']
if (np.size(data_list[i]) < ind[i] + self.channels[i]['sample_rate']):
notAtEnd = False
sampleLength += self.channels[i]['sample_rate']
dataOfOneSecond = np.array([], dtype=np.int32 if digital else None)
while notAtEnd:
# dataOfOneSecondInd = 0
del dataOfOneSecond
dataOfOneSecond = np.array([], dtype=np.int32 if digital else None)
for i in np.arange(len(data_list)):
# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])]
dataOfOneSecond = np.append(dataOfOneSecond,data_list[i].ravel()[int(ind[i]):int(ind[i]+sampleRates[i])])
# self.writePhysicalSamples(data_list[i].ravel()[int(ind[i]):int(ind[i]+self.channels[i]['sample_rate'])])
ind[i] += sampleRates[i]
# dataOfOneSecondInd += sampleRates[i]
if digital:
success = self.blockWriteDigitalSamples(dataOfOneSecond)
else:
success = self.blockWritePhysicalSamples(dataOfOneSecond)
if success<0:
raise IOError('Unknown error while calling blockWriteSamples')
for i in np.arange(len(data_list)):
if (np.size(data_list[i]) < ind[i] + sampleRates[i]):
notAtEnd = False
# dataOfOneSecondInd = 0
for i in np.arange(len(data_list)):
lastSamples = np.zeros(sampleRates[i], dtype=np.int32 if digital else None)
lastSampleInd = int(np.max(data_list[i].shape) - ind[i])
lastSampleInd = int(np.min((lastSampleInd,sampleRates[i])))
if lastSampleInd > 0:
lastSamples[:lastSampleInd] = data_list[i].ravel()[-lastSampleInd:]
# dataOfOneSecond[dataOfOneSecondInd:dataOfOneSecondInd+self.channels[i]['sample_rate']] = lastSamples
# dataOfOneSecondInd += self.channels[i]['sample_rate']
if digital:
success = self.writeDigitalSamples(lastSamples)
else:
success = self.writePhysicalSamples(lastSamples)
if success<0:
raise IOError('Unknown error while calling writeSamples')
# self.blockWritePhysicalSamples(dataOfOneSecond)
def writeAnnotation(self, onset_in_seconds, duration_in_seconds, description, str_format='utf-8'):
"""
Writes an annotation/event to the file
"""
if self.file_type in [FILETYPE_EDF, FILETYPE_BDF]:
raise TypeError('Trying to write annotation to EDF/BDF, must use EDF+/BDF+')
if str_format == 'utf-8':
if duration_in_seconds >= 0:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), du(description))
else:
return write_annotation_utf8(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, du(description))
else:
if duration_in_seconds >= 0:
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), np.round(duration_in_seconds*10000).astype(int), u(description).encode('latin1'))
else:
return write_annotation_latin1(self.handle, np.round(onset_in_seconds*10000).astype(int), -1, u(description).encode('latin1'))
def close(self):
"""
Closes the file.
"""
close_file(self.handle)
self.handle = -1
|
the-stack_0_10199 | import pytest
from galaxy.config import BaseAppConfiguration
from galaxy.config.schema import AppSchema
from galaxy.exceptions import ConfigurationError
# When a config property 'foo' has an attribute 'path_resolves_to', that attribute is a reference to
# another property 'bar'. Together, these two properties form a graph where 'foo' and 'bar are
# vertices and the reference from 'foo' to 'bar' is a directed edge.
#
# A schema may have any number of such implicit graphs, each having one or more edges. All together,
# they should form a DAG (directed acyclic graph).
#
# These tests ensure that the graph is loaded correctly for a variety of valid configurations,
# whereas an invalid configuration raises an error.
def get_schema(app_mapping):
return {'mapping': {'_': {'mapping': app_mapping}}}
def test_basecase(monkeypatch):
# Check that a valid graph is loaded correctly (this graph has 2 components)
mock_schema = {
'component1_path0': {
'type': 'str',
'default': 'value0',
},
'component1_path1': {
'type': 'str',
'default': 'value1',
'path_resolves_to': 'component1_path0',
},
'component1_path2': {
'type': 'str',
'default': 'value2',
'path_resolves_to': 'component1_path1',
},
'component2_path0': {
'type': 'str',
'default': 'value3',
},
'component2_path1': {
'type': 'str',
'default': 'value4',
'path_resolves_to': 'component2_path0',
},
}
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema))
monkeypatch.setattr(BaseAppConfiguration, '_load_schema', lambda a: AppSchema(None, '_'))
config = BaseAppConfiguration()
assert config.component1_path0 == 'value0'
assert config.component1_path1 == 'value0/value1'
assert config.component1_path2 == 'value0/value1/value2'
assert config.component2_path0 == 'value3'
assert config.component2_path1 == 'value3/value4'
def test_resolves_to_invalid_property(monkeypatch):
# 'path_resolves_to' should point to an existing property in the schema
mock_schema = {
'path0': {
'type': 'str',
'default': 'value0',
},
'path1': {
'type': 'str',
'default': 'value1',
'path_resolves_to': 'invalid', # invalid
},
}
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema))
with pytest.raises(ConfigurationError):
AppSchema(None, '_').validate_path_resolution_graph()
def test_path_resolution_cycle(monkeypatch):
# Must be a DAG, but this one has a cycle
mock_schema = {
'path0': {
'type': 'str',
'default': 'value0',
'path_resolves_to': 'path2',
},
'path1': {
'type': 'str',
'default': 'value1',
'path_resolves_to': 'path0',
},
'path2': {
'type': 'str',
'default': 'value2',
'path_resolves_to': 'path1',
},
}
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema))
with pytest.raises(ConfigurationError):
AppSchema(None, '_').validate_path_resolution_graph()
def test_path_invalid_type(monkeypatch):
# Paths should be strings
mock_schema = {
'path0': {
'type': 'str',
'default': 'value0',
},
'path1': {
'type': 'float', # invalid
'default': 'value1',
'path_resolves_to': 'path0',
},
}
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema))
with pytest.raises(ConfigurationError):
AppSchema(None, '_').validate_path_resolution_graph()
def test_resolves_to_invalid_type(monkeypatch):
# Paths should be strings
mock_schema = {
'path0': {
'type': 'int', # invalid
'default': 'value0',
},
'path1': {
'type': 'str',
'default': 'value1',
'path_resolves_to': 'path0',
},
}
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema))
with pytest.raises(ConfigurationError):
AppSchema(None, '_').validate_path_resolution_graph()
def test_resolves_with_empty_component(monkeypatch):
# A path can be None (root path is never None; may be asigned elsewhere)
mock_schema = {
'path0': {
'type': 'str',
'default': 'value0',
},
'path1': {
'type': 'str',
'path_resolves_to': 'path0',
},
'path2': {
'type': 'str',
'default': 'value2',
'path_resolves_to': 'path1',
},
}
monkeypatch.setattr(AppSchema, '_read_schema', lambda a, b: get_schema(mock_schema))
monkeypatch.setattr(BaseAppConfiguration, '_load_schema', lambda a: AppSchema(None, '_'))
config = BaseAppConfiguration()
assert config.path0 == 'value0'
assert config.path1 == 'value0'
assert config.path2 == 'value0/value2'
|
the-stack_0_10200 | # -*- coding: utf-8 -*-
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import time
import re
from typing import Any, Dict, List, Optional
from google.auth import credentials as auth_credentials
from google.cloud.aiplatform import base
from google.cloud.aiplatform import initializer
from google.cloud.aiplatform import utils
from google.cloud.aiplatform.utils import json_utils
from google.cloud.aiplatform.utils import pipeline_utils
from google.protobuf import json_format
from google.cloud.aiplatform.compat.types import (
pipeline_job_v1beta1 as gca_pipeline_job_v1beta1,
pipeline_state_v1beta1 as gca_pipeline_state_v1beta1,
)
_LOGGER = base.Logger(__name__)
_PIPELINE_COMPLETE_STATES = set(
[
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_SUCCEEDED,
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_FAILED,
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_CANCELLED,
gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_PAUSED,
]
)
_PIPELINE_ERROR_STATES = set(
[gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_FAILED]
)
# Pattern for valid names used as a Vertex resource name.
_VALID_NAME_PATTERN = re.compile("^[a-z][-a-z0-9]{0,127}$")
def _get_current_time() -> datetime.datetime:
"""Gets the current timestamp."""
return datetime.datetime.now()
def _set_enable_caching_value(
pipeline_spec: Dict[str, Any], enable_caching: bool
) -> None:
"""Sets pipeline tasks caching options.
Args:
pipeline_spec (Dict[str, Any]):
Required. The dictionary of pipeline spec.
enable_caching (bool):
Required. Whether to enable caching.
"""
for component in [pipeline_spec["root"]] + list(
pipeline_spec["components"].values()
):
if "dag" in component:
for task in component["dag"]["tasks"].values():
task["cachingOptions"] = {"enableCache": enable_caching}
class PipelineJob(base.VertexAiResourceNounWithFutureManager):
client_class = utils.PipelineJobClientWithOverride
_is_client_prediction_client = False
_resource_noun = "pipelineJobs"
_delete_method = "delete_pipeline_job"
_getter_method = "get_pipeline_job"
_list_method = "list_pipeline_jobs"
def __init__(
self,
display_name: str,
template_path: str,
job_id: Optional[str] = None,
pipeline_root: Optional[str] = None,
parameter_values: Optional[Dict[str, Any]] = None,
enable_caching: Optional[bool] = None,
encryption_spec_key_name: Optional[str] = None,
labels: Optional[Dict[str, str]] = None,
credentials: Optional[auth_credentials.Credentials] = None,
project: Optional[str] = None,
location: Optional[str] = None,
):
"""Retrieves a PipelineJob resource and instantiates its
representation.
Args:
display_name (str):
Required. The user-defined name of this Pipeline.
template_path (str):
Required. The path of PipelineJob or PipelineSpec JSON file. It
can be a local path or a Google Cloud Storage URI.
Example: "gs://project.name"
job_id (str):
Optional. The unique ID of the job run.
If not specified, pipeline name + timestamp will be used.
pipeline_root (str):
Optional. The root of the pipeline outputs. Default to be staging bucket.
parameter_values (Dict[str, Any]):
Optional. The mapping from runtime parameter names to its values that
control the pipeline run.
enable_caching (bool):
Optional. Whether to turn on caching for the run.
If this is not set, defaults to the compile time settings, which
are True for all tasks by default, while users may specify
different caching options for individual tasks.
If this is set, the setting applies to all tasks in the pipeline.
Overrides the compile time settings.
encryption_spec_key_name (str):
Optional. The Cloud KMS resource identifier of the customer
managed encryption key used to protect the job. Has the
form:
``projects/my-project/locations/my-region/keyRings/my-kr/cryptoKeys/my-key``.
The key needs to be in the same region as where the compute
resource is created.
If this is set, then all
resources created by the BatchPredictionJob will
be encrypted with the provided encryption key.
Overrides encryption_spec_key_name set in aiplatform.init.
labels (Dict[str,str]):
Optional. The user defined metadata to organize PipelineJob.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to create this batch prediction
job. Overrides credentials set in aiplatform.init.
project (str),
Optional. Project to retrieve PipelineJob from. If not set,
project set in aiplatform.init will be used.
location (str),
Optional. Location to create PipelineJob. If not set,
location set in aiplatform.init will be used.
Raises:
ValueError: If job_id or labels have incorrect format.
"""
utils.validate_display_name(display_name)
if labels:
utils.validate_labels(labels)
super().__init__(project=project, location=location, credentials=credentials)
self._parent = initializer.global_config.common_location_path(
project=project, location=location
)
pipeline_json = json_utils.load_json(
template_path, self.project, self.credentials
)
# Pipeline_json can be either PipelineJob or PipelineSpec.
if pipeline_json.get("pipelineSpec") is not None:
pipeline_job = pipeline_json
pipeline_root = (
pipeline_root
or pipeline_job["pipelineSpec"].get("defaultPipelineRoot")
or pipeline_job["runtimeConfig"].get("gcsOutputDirectory")
or initializer.global_config.staging_bucket
)
else:
pipeline_job = {
"pipelineSpec": pipeline_json,
"runtimeConfig": {},
}
pipeline_root = (
pipeline_root
or pipeline_job["pipelineSpec"].get("defaultPipelineRoot")
or initializer.global_config.staging_bucket
)
builder = pipeline_utils.PipelineRuntimeConfigBuilder.from_job_spec_json(
pipeline_job
)
builder.update_pipeline_root(pipeline_root)
builder.update_runtime_parameters(parameter_values)
runtime_config_dict = builder.build()
runtime_config = gca_pipeline_job_v1beta1.PipelineJob.RuntimeConfig()._pb
json_format.ParseDict(runtime_config_dict, runtime_config)
pipeline_name = pipeline_job["pipelineSpec"]["pipelineInfo"]["name"]
self.job_id = job_id or "{pipeline_name}-{timestamp}".format(
pipeline_name=re.sub("[^-0-9a-z]+", "-", pipeline_name.lower())
.lstrip("-")
.rstrip("-"),
timestamp=_get_current_time().strftime("%Y%m%d%H%M%S"),
)
if not _VALID_NAME_PATTERN.match(self.job_id):
raise ValueError(
"Generated job ID: {} is illegal as a Vertex pipelines job ID. "
"Expecting an ID following the regex pattern "
'"[a-z][-a-z0-9]{{0,127}}"'.format(job_id)
)
if enable_caching is not None:
_set_enable_caching_value(pipeline_job["pipelineSpec"], enable_caching)
self._gca_resource = gca_pipeline_job_v1beta1.PipelineJob(
display_name=display_name,
pipeline_spec=pipeline_job["pipelineSpec"],
labels=labels,
runtime_config=runtime_config,
encryption_spec=initializer.global_config.get_encryption_spec(
encryption_spec_key_name=encryption_spec_key_name
),
)
@base.optional_sync()
def run(
self,
service_account: Optional[str] = None,
network: Optional[str] = None,
sync: Optional[bool] = True,
) -> None:
"""Run this configured PipelineJob and monitor the job until completion.
Args:
service_account (str):
Optional. Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
Optional. The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
sync (bool):
Optional. Whether to execute this method synchronously. If False, this method will unblock and it will be executed in a concurrent Future.
"""
self.submit(service_account=service_account, network=network)
self._block_until_complete()
def submit(
self, service_account: Optional[str] = None, network: Optional[str] = None,
) -> None:
"""Run this configured PipelineJob.
Args:
service_account (str):
Optional. Specifies the service account for workload run-as account.
Users submitting jobs must have act-as permission on this run-as account.
network (str):
Optional. The full name of the Compute Engine network to which the job
should be peered. For example, projects/12345/global/networks/myVPC.
Private services access must already be configured for the network.
If left unspecified, the job is not peered with any network.
"""
if service_account:
self._gca_resource.service_account = service_account
if network:
self._gca_resource.network = network
_LOGGER.log_create_with_lro(self.__class__)
self._gca_resource = self.api_client.create_pipeline_job(
parent=self._parent,
pipeline_job=self._gca_resource,
pipeline_job_id=self.job_id,
)
_LOGGER.log_create_complete_with_getter(
self.__class__, self._gca_resource, "pipeline_job"
)
_LOGGER.info("View Pipeline Job:\n%s" % self._dashboard_uri())
def wait(self):
"""Wait for thie PipelineJob to complete."""
if self._latest_future is None:
self._block_until_complete()
else:
super().wait()
@property
def pipeline_spec(self):
return self._gca_resource.pipeline_spec
@property
def state(self) -> Optional[gca_pipeline_state_v1beta1.PipelineState]:
"""Current pipeline state."""
self._sync_gca_resource()
return self._gca_resource.state
@property
def has_failed(self) -> bool:
"""Returns True if pipeline has failed.
False otherwise.
"""
return (
self.state == gca_pipeline_state_v1beta1.PipelineState.PIPELINE_STATE_FAILED
)
def _dashboard_uri(self) -> str:
"""Helper method to compose the dashboard uri where pipeline can be
viewed."""
fields = utils.extract_fields_from_resource_name(self.resource_name)
url = f"https://console.cloud.google.com/vertex-ai/locations/{fields.location}/pipelines/runs/{fields.id}?project={fields.project}"
return url
def _block_until_complete(self):
"""Helper method to block and check on job until complete."""
# Used these numbers so failures surface fast
wait = 5 # start at five seconds
log_wait = 5
max_wait = 60 * 5 # 5 minute wait
multiplier = 2 # scale wait by 2 every iteration
previous_time = time.time()
while self.state not in _PIPELINE_COMPLETE_STATES:
current_time = time.time()
if current_time - previous_time >= log_wait:
_LOGGER.info(
"%s %s current state:\n%s"
% (
self.__class__.__name__,
self._gca_resource.name,
self._gca_resource.state,
)
)
log_wait = min(log_wait * multiplier, max_wait)
previous_time = current_time
time.sleep(wait)
# Error is only populated when the job state is
# JOB_STATE_FAILED or JOB_STATE_CANCELLED.
if self._gca_resource.state in _PIPELINE_ERROR_STATES:
raise RuntimeError("Job failed with:\n%s" % self._gca_resource.error)
else:
_LOGGER.log_action_completed_against_resource("run", "completed", self)
@classmethod
def get(
cls,
resource_name: str,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> "PipelineJob":
"""Get a Vertex AI Pipeline Job for the given resource_name.
Args:
resource_name (str):
Required. A fully-qualified resource name or ID.
project (str):
Optional. Project to retrieve dataset from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve dataset from. If not set,
location set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to upload this model.
Overrides credentials set in aiplatform.init.
Returns:
A Vertex AI PipelineJob.
"""
self = cls._empty_constructor(
project=project,
location=location,
credentials=credentials,
resource_name=resource_name,
)
self._gca_resource = self._get_gca_resource(resource_name=resource_name)
return self
def cancel(self) -> None:
"""Starts asynchronous cancellation on the PipelineJob. The server
makes a best effort to cancel the job, but success is not guaranteed.
On successful cancellation, the PipelineJob is not deleted; instead it
becomes a job with state set to `CANCELLED`.
"""
self.api_client.cancel_pipeline_job(name=self.resource_name)
@classmethod
def list(
cls,
filter: Optional[str] = None,
order_by: Optional[str] = None,
project: Optional[str] = None,
location: Optional[str] = None,
credentials: Optional[auth_credentials.Credentials] = None,
) -> List["PipelineJob"]:
"""List all instances of this PipelineJob resource.
Example Usage:
aiplatform.PipelineJob.list(
filter='display_name="experiment_a27"',
order_by='create_time desc'
)
Args:
filter (str):
Optional. An expression for filtering the results of the request.
For field names both snake_case and camelCase are supported.
order_by (str):
Optional. A comma-separated list of fields to order by, sorted in
ascending order. Use "desc" after a field name for descending.
Supported fields: `display_name`, `create_time`, `update_time`
project (str):
Optional. Project to retrieve list from. If not set, project
set in aiplatform.init will be used.
location (str):
Optional. Location to retrieve list from. If not set, location
set in aiplatform.init will be used.
credentials (auth_credentials.Credentials):
Optional. Custom credentials to use to retrieve list. Overrides
credentials set in aiplatform.init.
Returns:
List[PipelineJob] - A list of PipelineJob resource objects
"""
return cls._list_with_local_order(
filter=filter,
order_by=order_by,
project=project,
location=location,
credentials=credentials,
)
def wait_for_resource_creation(self) -> None:
"""Waits until resource has been created."""
self._wait_for_resource_creation()
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.