ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py
|
1a5ce94196413e07370d609d78dd290c318d3a5d
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from collections import namedtuple
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import resources
from neutron_lib import exceptions as n_exceptions
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.services.logapi import constants as log_const
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import importutils
from ovsdbapp.backend.ovs_idl import idlutils
from neutron._i18n import _
from neutron.common.ovn import constants as ovn_const
from neutron.common.ovn import utils
from neutron.conf.services import logging as log_cfg
from neutron.services.logapi.common import db_api
from neutron.services.logapi.common import sg_callback
from neutron.services.logapi.drivers import base
from neutron.services.logapi.drivers import manager
LOG = logging.getLogger(__name__)
DRIVER = None
log_cfg.register_log_driver_opts()
SUPPORTED_LOGGING_TYPES = [log_const.SECURITY_GROUP]
class LoggingNotSupported(n_exceptions.NeutronException):
message = _("The current OVN version does not offer support "
"for neutron network log functionality.")
class OVNDriver(base.DriverBase):
def __init__(self):
super().__init__(
name="ovn",
vif_types=[portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER],
vnic_types=[portbindings.VNIC_NORMAL],
supported_logging_types=SUPPORTED_LOGGING_TYPES,
requires_rpc=False)
self._log_plugin_property = None
self.meter_name = (
cfg.CONF.network_log.local_output_log_base or "acl_log_meter")
@staticmethod
def network_logging_supported(ovn_nb):
columns = list(ovn_nb._tables["Meter"].columns)
return ("fair" in columns)
@classmethod
def create(cls, plugin_driver):
cls.plugin_driver = plugin_driver
return OVNDriver()
@property
def _log_plugin(self):
if self._log_plugin_property is None:
self._log_plugin_property = directory.get_plugin(
plugin_constants.LOG_API)
return self._log_plugin_property
@staticmethod
def _log_dict_to_obj(log_dict):
cls = namedtuple('Log_obj', log_dict)
cls.__new__.__defaults__ = tuple(log_dict.values())
return cls()
def _get_logs(self, context):
log_objs = self._log_plugin.get_logs(context)
return [self._log_dict_to_obj(lo) for lo in log_objs]
@property
def ovn_nb(self):
return self.plugin_driver.nb_ovn
def _create_ovn_fair_meter(self, ovn_txn):
"""Create row in Meter table with fair attribute set to True.
Create a row in OVN's NB Meter table based on well-known name. This
method uses the network_log configuration to specify the attributes
of the meter. Current implementation needs only one 'fair' meter row
which is then referred by multiple ACL rows.
:param ovn_txn: ovn nortbound idl transaction.
"""
meter = self.ovn_nb.db_find_rows(
"Meter", ("name", "=", self.meter_name)).execute(check_error=True)
if meter:
meter = meter[0]
try:
meter_band = self.ovn_nb.lookup("Meter_Band",
meter.bands[0].uuid)
if all((meter.unit == "pktps",
meter.fair[0],
meter_band.rate == cfg.CONF.network_log.rate_limit,
meter_band.burst_size ==
cfg.CONF.network_log.burst_limit)):
# Meter (and its meter-band) unchanged: noop.
return
except idlutils.RowNotFound:
pass
# Re-create meter (and its meter-band) with the new attributes.
# This is supposed to happen only if configuration changed, so
# doing updates is an overkill: better to leverage the ovsdbapp
# library to avoid the complexity.
ovn_txn.add(self.ovn_nb.meter_del(meter.uuid))
# Create meter
LOG.info("Creating network log fair meter %s", self.meter_name)
ovn_txn.add(self.ovn_nb.meter_add(
name=self.meter_name,
unit="pktps",
rate=cfg.CONF.network_log.rate_limit,
fair=True,
burst_size=cfg.CONF.network_log.burst_limit,
may_exist=False,
external_ids={ovn_const.OVN_DEVICE_OWNER_EXT_ID_KEY:
log_const.LOGGING_PLUGIN}))
@staticmethod
def _acl_actions_enabled(log_obj):
if not log_obj.enabled:
return set()
if log_obj.event == log_const.ACCEPT_EVENT:
return {ovn_const.ACL_ACTION_ALLOW_RELATED,
ovn_const.ACL_ACTION_ALLOW}
if log_obj.event == log_const.DROP_EVENT:
return {ovn_const.ACL_ACTION_DROP,
ovn_const.ACL_ACTION_REJECT}
# Fall through case: log_const.ALL_EVENT
return {ovn_const.ACL_ACTION_DROP,
ovn_const.ACL_ACTION_REJECT,
ovn_const.ACL_ACTION_ALLOW_RELATED,
ovn_const.ACL_ACTION_ALLOW}
def _remove_acls_log(self, pgs, ovn_txn, log_name=None):
acl_changes, acl_visits = 0, 0
for pg in pgs:
for acl_uuid in pg["acls"]:
acl_visits += 1
# skip acls used by a different network log
if log_name:
acl = self.ovn_nb.lookup("ACL", acl_uuid)
if acl.name and acl.name[0] != log_name:
continue
ovn_txn.add(self.ovn_nb.db_set(
"ACL", acl_uuid,
("log", False),
("meter", []),
("name", []),
("severity", [])
))
acl_changes += 1
msg = "Cleared %d (out of %d visited) ACLs"
if log_name:
msg += " for network log {}".format(log_name)
LOG.info(msg, acl_changes, acl_visits)
def _set_acls_log(self, pgs, ovn_txn, actions_enabled, log_name):
acl_changes, acl_visits = 0, 0
for pg in pgs:
for acl_uuid in pg["acls"]:
acl_visits += 1
acl = self.ovn_nb.lookup("ACL", acl_uuid)
# skip acls used by a different network log
if acl.name and acl.name[0] != log_name:
continue
ovn_txn.add(self.ovn_nb.db_set(
"ACL", acl_uuid,
("log", acl.action in actions_enabled),
("meter", self.meter_name),
("name", log_name),
("severity", "info")
))
acl_changes += 1
LOG.info("Set %d (out of %d visited) ACLs for network log %s",
acl_changes, acl_visits, log_name)
def _update_log_objs(self, context, ovn_txn, log_objs):
for log_obj in log_objs:
pgs = self._pgs_from_log_obj(context, log_obj)
actions_enabled = self._acl_actions_enabled(log_obj)
self._set_acls_log(pgs, ovn_txn, actions_enabled,
utils.ovn_name(log_obj.id))
def _pgs_all(self):
return self.ovn_nb.db_list(
"Port_Group", columns=["name", "acls"]).execute(check_error=True)
def _pgs_from_log_obj(self, context, log_obj):
"""Map Neutron log_obj into affected port groups in OVN.
:param context: current running context information
:param log_obj: a log_object to be analyzed.
"""
if not log_obj.resource_id and not log_obj.target_id:
# No sg, no port: return all pgs
return self._pgs_all()
pgs = []
# include special pg_drop to log DROP and ALL actions
if not log_obj.event or log_obj.event in (log_const.DROP_EVENT,
log_const.ALL_EVENT):
try:
pg = self.ovn_nb.lookup("Port_Group",
ovn_const.OVN_DROP_PORT_GROUP_NAME)
pgs.append({"name": pg.name,
"acls": [r.uuid for r in pg.acls]})
except idlutils.RowNotFound:
pass
if log_obj.resource_id:
try:
pg = self.ovn_nb.lookup("Port_Group",
utils.ovn_port_group_name(
log_obj.resource_id))
pgs.append({"name": pg.name,
"acls": [r.uuid for r in pg.acls]})
except idlutils.RowNotFound:
pass
# Note: when sg is provided, it is redundant to get sgs from port,
# because model will ensure that sg is associated with neutron port
elif log_obj.target_id:
sg_ids = db_api._get_sgs_attached_to_port(context,
log_obj.target_id)
for sg_id in sg_ids:
try:
pg = self.ovn_nb.lookup("Port_Group",
utils.ovn_port_group_name(sg_id))
pgs.append({"name": pg.name,
"acls": [r.uuid for r in pg.acls]})
except idlutils.RowNotFound:
pass
return pgs
def create_log(self, context, log_obj):
"""Create a log_obj invocation.
:param context: current running context information
:param log_obj: a log objects being created
"""
LOG.debug("Create_log %s", log_obj)
pgs = self._pgs_from_log_obj(context, log_obj)
actions_enabled = self._acl_actions_enabled(log_obj)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._create_ovn_fair_meter(ovn_txn)
self._set_acls_log(pgs, ovn_txn, actions_enabled,
utils.ovn_name(log_obj.id))
def create_log_precommit(self, context, log_obj):
"""Create a log_obj precommit.
:param context: current running context information
:param log_obj: a log object being created
"""
LOG.debug("Create_log_precommit %s", log_obj)
if not self.network_logging_supported(self.ovn_nb):
raise LoggingNotSupported()
def update_log(self, context, log_obj):
"""Update a log_obj invocation.
:param context: current running context information
:param log_obj: a log object being updated
"""
LOG.debug("Update_log %s", log_obj)
pgs = self._pgs_from_log_obj(context, log_obj)
actions_enabled = self._acl_actions_enabled(log_obj)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._set_acls_log(pgs, ovn_txn, actions_enabled,
utils.ovn_name(log_obj.id))
def delete_log(self, context, log_obj):
"""Delete a log_obj invocation.
:param context: current running context information
:param log_obj: a log_object being deleted
"""
LOG.debug("Delete_log %s", log_obj)
# If we are removing the last log_obj, let's clear log from all acls.
# This is a simple way of ensuring that no acl logs are left behind!
log_objs = self._get_logs(context)
if not log_objs or (
len(log_objs) == 1 and log_objs[0].id == log_obj.id):
pgs = self._pgs_all()
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._remove_acls_log(pgs, ovn_txn)
ovn_txn.add(self.ovn_nb.meter_del(self.meter_name,
if_exists=True))
LOG.info("All ACL logs cleared after deletion of log_obj %s",
log_obj.id)
return
# Remove log_obj and revisit all remaining ones, since the acls that
# were serving the removed log_obj may be usable by the remaining
# log_objs.
pgs = self._pgs_from_log_obj(context, log_obj)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._remove_acls_log(pgs, ovn_txn, utils.ovn_name(log_obj.id))
# TODO(flaviof): We needed to break this second part into a separate
# transaction because logic that determines the value of the 'freed up'
# acl rows will not see the modified rows unless it was inside an an
# idl command.
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._update_log_objs(context, ovn_txn, [lo for lo in log_objs
if lo.id != log_obj.id])
def resource_update(self, context, log_objs):
"""Tell the agent when resources related to log_objects are
being updated
:param context: current running context information
:param log_objs: a list of log_objects, whose related resources are
being updated.
"""
LOG.debug("Resource_update %s", log_objs)
with self.ovn_nb.transaction(check_error=True) as ovn_txn:
self._update_log_objs(context, ovn_txn, log_objs)
def register(plugin_driver):
"""Register the driver."""
global DRIVER
if not DRIVER:
DRIVER = OVNDriver.create(plugin_driver)
# Trigger decorator
importutils.import_module(
"neutron.services.logapi.common.sg_validate"
)
# Register resource callback handler
manager.register(
resources.SECURITY_GROUP_RULE, sg_callback.SecurityGroupRuleCallBack)
LOG.info("OVN logging driver registered")
return DRIVER
|
py
|
1a5cea5dbee27c7528bbaaa7ce93aa2f2cbb6197
|
# -*- coding: utf-8 -*-
# Copyright 2021 Tampere University and VTT Technical Research Centre of Finland
# This software was developed as a part of the ProCemPlus project: https://www.senecc.fi/projects/procemplus
# This source code is licensed under the MIT license. See LICENSE in the repository root directory.
# Author(s): Otto Hylli <[email protected]>
# Antti Keski-Koukkari <[email protected]>
'''
Contains class for a simulation platform component used to simulate energy storages.
'''
import asyncio
from typing import Union, Any
from tools.components import AbstractSimulationComponent
from tools.messages import BaseMessage, AbstractResultMessage
from tools.tools import FullLogger, load_environmental_variables, EnvironmentVariable
from tools.datetime_tools import to_utc_datetime_object
from domain_messages.resource import ResourceStateMessage
from domain_messages.ControlState import ControlStatePowerSetpointMessage
from domain_tools.resource.resource_state_source import ResourceState, CsvFileResourceStateSource, CsvFileError
from storage_resource.state import StorageState
# names of used environment variables
RESOURCE_STATE_TOPIC = "RESOURCE_STATE_TOPIC"
RESOURCE_STATE_CSV_FILE = "RESOURCE_STATE_CSV_FILE"
RESOURCE_STATE_CSV_DELIMITER = "RESOURCE_STATE_CSV_DELIMITER"
CUSTOMER_ID = 'CUSTOMER_ID'
NODE = 'NODE'
CHARGE_RATE = 'CHARGE_RATE'
DISCHARGE_RATE = 'DISCHARGE_RATE'
INITIAL_STATE_OF_CHARGE = 'INITIAL_STATE_OF_CHARGE'
KWH_RATED = 'KWH_RATED'
DISCHARGE_EFFICIENCY = 'DISCHARGE_EFFICIENCY'
CHARGE_EFFICIENCY = 'CHARGE_EFFICIENCY'
KW_RATED = 'KW_RATED'
SELF_DISCHARGE = 'SELF_DISCHARGE'
CONTROLLER_ID = 'CONTROLLER_ID'
CONTROL_STATE_TOPIC = 'CONTROL_STATE_TOPIC'
LOGGER = FullLogger( __name__ )
class StorageResource(AbstractSimulationComponent):
'''
A simulation platform component used to simulate energy storages. For each epoch it gets the power required from it from either a csv file or from a ControlState message.
It calculates its state based on the power and epoch length. Then it publishes its state as a ResourceState message which
includes the actual power the storage could manage and its state of charge.
'''
def __init__(self, storage: StorageState, state_source: CsvFileResourceStateSource = None, initialization_error: str = None ):
'''
Create a storage resource.
storage: Used to model and calculate the actual state of the storage.
state_source: If not None this storage will operate according to the power values from the given source. If None expects to get ControlState messages.
initialization_error: If not None indicates that the component cannot function properly and it should send an error message with the given error message when the simulation starts.
'''
super().__init__()
self._storage = storage
self._state_source = state_source
self.initialization_error = initialization_error
if self.initialization_error is not None:
LOGGER.error( self.initialization_error )
# get used main topics from environment or use defaults.
environment = load_environmental_variables(
(RESOURCE_STATE_TOPIC, str, "ResourceState"),
(CONTROL_STATE_TOPIC, str, 'ControlState.PowerSetpoint' )
)
# used as a part of the topic ResourceState messages are published to.
self._type = 'Storage'
self._resource_state_topic = environment[ RESOURCE_STATE_TOPIC ]
# publish resource states to this topic
self._result_topic = '.'.join( [ self._resource_state_topic, self._type, self.component_name ])
# other topics component listens to. Possibly the ControlState topic.
other_topics = []
if state_source is None:
# no csv file subscribe to ControlState messages
control_state_topic = environment[ CONTROL_STATE_TOPIC ]
control_state_topic = control_state_topic +'.' +self.component_name
other_topics = [ control_state_topic ]
LOGGER.info(f'Listening to control state messages from topic {control_state_topic}.')
else:
LOGGER.info('Using a CSV file as the control state source.')
# super class will handle subscribing to the topic.
self._other_topics = other_topics
# store ControlState message for current epoch here.
self._control_state_for_epoch = None
async def process_epoch(self) -> bool:
'''
Handles the processing of an epoch by calculating the new state for the storage and publishing it for the epoch.
'''
LOGGER.debug( f'Starting to process epoch {self._latest_epoch}.' )
try:
await self._send_resource_state_message()
except Exception as error:
description = f'Unable to create or send a ResourceState message: {str( error )}'
LOGGER.error( description )
await self.send_error_message(description)
return False
return True
async def all_messages_received_for_epoch(self) -> bool:
'''Component is ready to process a epoch if it uses a csv state source or if it has already gotten a ControlState message for the current epoch.'''
return self._state_source is not None or (self._control_state_for_epoch is not None and self._control_state_for_epoch.epoch_number == self._latest_epoch)
async def general_message_handler(self, message_object: Union[BaseMessage, Any],
message_routing_key: str) -> None:
'''Handle receiving of ControlState messages.'''
# Check that we have a ControlState message and it is what we expect.
if isinstance( message_object, ControlStatePowerSetpointMessage ):
if message_object.epoch_number != self._latest_epoch:
LOGGER.warning(f'Got a control state message with id {message_object.message_id} for epoch {message_object.epoch_number} but was expecting it for epoch {self._latest_epoch}.')
# ignore message
return
if self._control_state_for_epoch is not None and self._control_state_for_epoch.epoch_number == message_object.epoch_number:
LOGGER.warning(f'Already had received a control state message for epoch {self._latest_epoch} but received another one with message id {message_object.message_id}.')
# ignore message
return
# got an expected message. Ready to process epoch.
self._control_state_for_epoch = message_object
self._triggering_message_ids.append(message_object.message_id)
await self.start_epoch()
async def _send_resource_state_message(self):
'''
Calculates new state for the storage and publishes it as a ResourceState message.
'''
resource_state = self._get_resource_state_message()
await self._rabbitmq_client.send_message(self._result_topic, resource_state.bytes())
def _get_resource_state_message(self) -> ResourceStateMessage:
'''
Create a ResourceStateMessage from the new state of the storage.
'''
if self._state_source is not None:
# get desired power from state source
control_state = self._state_source.getNextEpochData()
# add possible new customer id and node to storage so it can be included as a part of the resource state message
self._storage.customer_id = control_state.customerid
self._storage.node = control_state.node
else:
# get storage control information from received message
control_state = ResourceState( customerid = None, real_power = self._control_state_for_epoch.real_power.value, reactive_power = self._control_state_for_epoch.reactive_power.value )
# power desired from the storage
real_power = control_state.real_power
# calculate the duration of the epoch in hours required to calculate the new state of the storage
epoch_start = to_utc_datetime_object( self._latest_epoch_message.start_time )
epoch_end = to_utc_datetime_object( self._latest_epoch_message.end_time )
epoch_duration = epoch_end -epoch_start
epoch_duration_h = epoch_duration.total_seconds() /3600
state = self._storage.calculate_state(real_power, epoch_duration_h)
# create ResourceState message based on the storage state
message = ResourceStateMessage(
SimulationId = self.simulation_id,
Type = ResourceStateMessage.CLASS_MESSAGE_TYPE,
SourceProcessId = self.component_name,
MessageId = next(self._message_id_generator),
EpochNumber = self._latest_epoch,
TriggeringMessageIds = self._triggering_message_ids,
CustomerId = state.customerid,
node = state.node,
RealPower = state.real_power,
ReactivePower = state.reactive_power,
Node = state.node,
StateOfCharge = state.state_of_charge
)
if control_state.real_power != state.real_power:
# storage could not operate with the required power so add a warning about it.
message.warnings = [ 'warning.input.range' ]
return message
def create_component() -> StorageResource:
'''
Create a StorageResource based on the initialization environment variables.
'''
# specify environment variables to be read.For optional ones mark default value as None though it is the default any way.
env_variable_spec = (
( RESOURCE_STATE_CSV_FILE, str, None ),
( RESOURCE_STATE_CSV_DELIMITER, str, "," ),
( CUSTOMER_ID, str, None ),
( NODE, str, None ),
( CHARGE_RATE, float, 100.0 ),
( DISCHARGE_RATE, float, 100.0 ),
( CHARGE_EFFICIENCY, float, 90.0 ),
( DISCHARGE_EFFICIENCY, float, 90.0 ),
( KWH_RATED, float ),
( INITIAL_STATE_OF_CHARGE, float ),
( KW_RATED, float ),
( SELF_DISCHARGE, float, 0.0 )
)
environment = load_environmental_variables( *env_variable_spec )
# check if some required environment variables were missing.
missing = []
for item in env_variable_spec:
if len( item ) == 2:
# no explicit default value given so this was required
if environment[ item[0] ] is None:
missing.append( item[0] )
initialization_error = None # possible initialization error message goes here
if len( missing ) > 0:
initialization_error = 'Component missing required initialization environment variables: ' +', '.join( missing )
csv_file = environment[RESOURCE_STATE_CSV_FILE]
state_source = None # if a state source is used it goes here.
if csv_file is not None:
node = None # no initial value for storage node. Read from csv.
try:
state_source = CsvFileResourceStateSource( csv_file, environment[RESOURCE_STATE_CSV_DELIMITER])
except CsvFileError as error:
initialization_error = f'Unable to create a csv file resource state source for the component: {str( error )}'
elif csv_file is None:
# Since currently ControlState message does not have node it can be set with environment variable
node = environment[NODE]
# since state source is not used customer id is required from environment
# ResourceState message requires customer id and ControlState message does not include it.
if environment[CUSTOMER_ID] is None:
initialization_error = f'when {RESOURCE_STATE_CSV_FILE} initialization environment variable is not given {CUSTOMER_ID} is required.'
storage = None
try:
# create storage state based on environment variables to be used by the component
storage = StorageState(customer_id = environment[CUSTOMER_ID], node = node, initial_state_of_charge = environment[INITIAL_STATE_OF_CHARGE], kwh_rated = environment[KWH_RATED], kw_rated = environment[KW_RATED],
self_discharge = environment[SELF_DISCHARGE], charge_rate = environment[CHARGE_RATE],
discharge_rate = environment[DISCHARGE_RATE ], charge_efficiency = environment[CHARGE_EFFICIENCY],
discharge_efficiency = environment[DISCHARGE_EFFICIENCY])
except Exception as err:
initialization_error = f'Unable to create a storage state: {err}'
# create component
return StorageResource(storage, state_source, initialization_error )
async def start_component():
'''
Create and start a StorageResource component.
'''
try:
resource = create_component()
await resource.start()
while not resource.is_stopped:
await asyncio.sleep( 2 )
except Exception as error:
LOGGER.error("{} : {}".format(type(error).__name__, error))
if __name__ == '__main__':
asyncio.run(start_component())
|
py
|
1a5cea9eb29171261951261404c94d6e70abc7a6
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
This module contains operator for uploading local file to GCS.
"""
import warnings
from airflow.models import BaseOperator
from airflow.providers.google.cloud.hooks.gcs import GCSHook
from airflow.utils.decorators import apply_defaults
class LocalFilesystemToGCSOperator(BaseOperator):
"""
Uploads a file to Google Cloud Storage.
Optionally can compress the file for upload.
:param src: Path to the local file. (templated)
:type src: str
:param dst: The object name to set when uploading the file. (templated)
:type dst: str
:param bucket: The bucket to upload to. (templated)
:type bucket: str
:param gcp_conn_id: (Optional) The connection ID used to connect to Google Cloud Platform.
:type gcp_conn_id: str
:param google_cloud_storage_conn_id: (Deprecated) The connection ID used to connect to Google Cloud
Platform. This parameter has been deprecated. You should pass the gcp_conn_id parameter instead.
:type google_cloud_storage_conn_id: str
:param mime_type: The mime-type string
:type mime_type: str
:param delegate_to: The account to impersonate, if any
:type delegate_to: str
:param gzip: Allows for file to be compressed and uploaded as gzip
:type gzip: bool
"""
template_fields = ('src', 'dst', 'bucket')
@apply_defaults
def __init__(self,
src,
dst,
bucket,
gcp_conn_id='google_cloud_default',
google_cloud_storage_conn_id=None,
mime_type='application/octet-stream',
delegate_to=None,
gzip=False,
*args,
**kwargs):
super().__init__(*args, **kwargs)
if google_cloud_storage_conn_id:
warnings.warn(
"The google_cloud_storage_conn_id parameter has been deprecated. You should pass "
"the gcp_conn_id parameter.", DeprecationWarning, stacklevel=3)
gcp_conn_id = google_cloud_storage_conn_id
self.src = src
self.dst = dst
self.bucket = bucket
self.gcp_conn_id = gcp_conn_id
self.mime_type = mime_type
self.delegate_to = delegate_to
self.gzip = gzip
def execute(self, context):
"""
Uploads the file to Google Cloud Storage
"""
hook = GCSHook(
google_cloud_storage_conn_id=self.gcp_conn_id,
delegate_to=self.delegate_to)
hook.upload(
bucket_name=self.bucket,
object_name=self.dst,
mime_type=self.mime_type,
filename=self.src,
gzip=self.gzip,
)
|
py
|
1a5cec3e6d52105645f8abf406a445c9a7f772fb
|
# Import libraries
import argparse
from azureml.core import Run
# Get parameters
parser = argparse.ArgumentParser()
parser.add_argument('--model_folder', type=str, dest='model_folder',
default="driver-training", help='model location')
args = parser.parse_args()
model_folder = args.model_folder
# Get the experiment run context
run = Run.get_context()
# load the model
print("Loading model from " + model_folder)
model_name = 'porto_seguro_safe_driver_model'
model_file = model_folder + "/" + model_name + ".pkl"
# Get metrics for registration
metrics = run.parent.get_metrics()
# Register the model
run.upload_file(model_name, model_file)
run.register_model(
model_path=model_name,
model_name=model_name,
tags=metrics)
run.complete()
|
py
|
1a5ced67dc09614b1c210f23be68a857fc245fd1
|
# Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from copy import deepcopy
from multiprocessing.pool import Pool
import numpy as np
from collections import OrderedDict
from batchgenerators.utilities.file_and_folder_operations import *
from meddec.paper_plot.nature_methods.challenge_visualization_stuff.own_implementation.ranking import \
rank_then_aggregate
import scipy.stats as ss
from nnunet.dataset_conversion.Task032_BraTS_2018 import convert_labels_back_to_BraTS_2018_2019_convention
from nnunet.dataset_conversion.Task043_BraTS_2019 import copy_BraTS_segmentation_and_convert_labels
from nnunet.evaluation.region_based_evaluation import get_brats_regions, evaluate_regions
from nnunet.paths import nnUNet_raw_data
import SimpleITK as sitk
import shutil
from medpy.metric import dc, hd95
from nnunet.postprocessing.consolidate_postprocessing import collect_cv_niftis
from typing import Tuple
def apply_brats_threshold(fname, out_dir, threshold, replace_with):
img_itk = sitk.ReadImage(fname)
img_npy = sitk.GetArrayFromImage(img_itk)
s = np.sum(img_npy == 3)
if s < threshold:
print(s, fname)
img_npy[img_npy == 3] = replace_with
img_itk_postprocessed = sitk.GetImageFromArray(img_npy)
img_itk_postprocessed.CopyInformation(img_itk)
sitk.WriteImage(img_itk_postprocessed, join(out_dir, fname.split("/")[-1]))
def load_niftis_threshold_compute_dice(gt_file, pred_file, thresholds: Tuple[list, tuple]):
gt = sitk.GetArrayFromImage(sitk.ReadImage(gt_file))
pred = sitk.GetArrayFromImage(sitk.ReadImage(pred_file))
mask_pred = pred == 3
mask_gt = gt == 3
num_pred = np.sum(mask_pred)
num_gt = np.sum(mask_gt)
dice = dc(mask_pred, mask_gt)
res_dice = {}
res_was_smaller = {}
for t in thresholds:
was_smaller = False
if num_pred < t:
was_smaller = True
if num_gt == 0:
dice_here = 1.
else:
dice_here = 0.
else:
dice_here = deepcopy(dice)
res_dice[t] = dice_here
res_was_smaller[t] = was_smaller
return res_was_smaller, res_dice
def apply_threshold_to_folder(folder_in, folder_out, threshold, replace_with, processes=24):
maybe_mkdir_p(folder_out)
niftis = subfiles(folder_in, suffix='.nii.gz', join=True)
p = Pool(processes)
p.starmap(apply_brats_threshold, zip(niftis, [folder_out]*len(niftis), [threshold]*len(niftis), [replace_with] * len(niftis)))
p.close()
p.join()
def determine_brats_postprocessing(folder_with_preds, folder_with_gt, postprocessed_output_dir, processes=8,
thresholds=(0, 10, 50, 100, 200, 500, 750, 1000, 1500, 2500, 10000), replace_with=2):
# find pairs
nifti_gt = subfiles(folder_with_gt, suffix=".nii.gz", sort=True)
p = Pool(processes)
nifti_pred = subfiles(folder_with_preds, suffix='.nii.gz', sort=True)
results = p.starmap_async(load_niftis_threshold_compute_dice, zip(nifti_gt, nifti_pred, [thresholds] * len(nifti_pred)))
results = results.get()
all_dc_per_threshold = {}
for t in thresholds:
all_dc_per_threshold[t] = np.array([i[1][t] for i in results])
print(t, np.mean(all_dc_per_threshold[t]))
means = [np.mean(all_dc_per_threshold[t]) for t in thresholds]
best_threshold = thresholds[np.argmax(means)]
print('best', best_threshold, means[np.argmax(means)])
maybe_mkdir_p(postprocessed_output_dir)
p.starmap(apply_brats_threshold, zip(nifti_pred, [postprocessed_output_dir]*len(nifti_pred), [best_threshold]*len(nifti_pred), [replace_with] * len(nifti_pred)))
p.close()
p.join()
save_pickle((thresholds, means, best_threshold, all_dc_per_threshold), join(postprocessed_output_dir, "threshold.pkl"))
def collect_and_prepare(base_dir, num_processes = 12, clean=False):
"""
collect all cv_niftis, compute brats metrics, compute enh tumor thresholds and summarize in csv
:param base_dir:
:return:
"""
out = join(base_dir, 'cv_results')
out_pp = join(base_dir, 'cv_results_pp')
experiments = subfolders(base_dir, join=False, prefix='nnUNetTrainer')
regions = get_brats_regions()
gt_dir = join(base_dir, 'gt_niftis')
replace_with = 2
failed = []
successful = []
for e in experiments:
print(e)
try:
o = join(out, e)
o_p = join(out_pp, e)
maybe_mkdir_p(o)
maybe_mkdir_p(o_p)
collect_cv_niftis(join(base_dir, e), o)
if clean or not isfile(join(o, 'summary.csv')):
evaluate_regions(o, gt_dir, regions, num_processes)
if clean or not isfile(join(o_p, 'threshold.pkl')):
determine_brats_postprocessing(o, gt_dir, o_p, num_processes, thresholds=list(np.arange(0, 760, 10)), replace_with=replace_with)
if clean or not isfile(join(o_p, 'summary.csv')):
evaluate_regions(o_p, gt_dir, regions, num_processes)
successful.append(e)
except Exception as ex:
print("\nERROR\n", e, ex, "\n")
failed.append(e)
# we are interested in the mean (nan is 1) column
with open(join(base_dir, 'cv_summary.csv'), 'w') as f:
f.write('name,whole,core,enh,mean\n')
for e in successful:
expected_nopp = join(out, e, 'summary.csv')
expected_pp = join(out, out_pp, e, 'summary.csv')
if isfile(expected_nopp):
res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_noPP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
if isfile(expected_pp):
res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_PP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
# this just crawls the folders and evaluates what it finds
with open(join(base_dir, 'cv_summary2.csv'), 'w') as f:
for folder in ['cv_results', 'cv_results_pp']:
for ex in subdirs(join(base_dir, folder), join=False):
print(folder, ex)
expected = join(base_dir, folder, ex, 'summary.csv')
if clean or not isfile(expected):
evaluate_regions(join(base_dir, folder, ex), gt_dir, regions, num_processes)
if isfile(expected):
res = np.loadtxt(expected, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write('%s__%s,' % (folder, ex))
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
f.write('name,whole,core,enh,mean\n')
for e in successful:
expected_nopp = join(out, e, 'summary.csv')
expected_pp = join(out, out_pp, e, 'summary.csv')
if isfile(expected_nopp):
res = np.loadtxt(expected_nopp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_noPP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
if isfile(expected_pp):
res = np.loadtxt(expected_pp, dtype=str, skiprows=0, delimiter=',')[-2]
as_numeric = [float(i) for i in res[1:]]
f.write(e + '_PP,')
f.write("%0.4f," % as_numeric[0])
f.write("%0.4f," % as_numeric[1])
f.write("%0.4f," % as_numeric[2])
f.write("%0.4f\n" % np.mean(as_numeric))
# apply threshold to val set
expected_num_cases = 125
missing_valset = []
has_val_pred = []
for e in successful:
if isdir(join(base_dir, 'predVal', e)):
currdir = join(base_dir, 'predVal', e)
files = subfiles(currdir, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
continue
output_folder = join(base_dir, 'predVal_PP', e)
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
has_val_pred.append(e)
else:
print(e, 'has no valset predictions')
missing_valset.append(e)
# 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
# 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold' needs special treatment
e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
currdir = join(base_dir, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
output_folder = join(base_dir, 'predVal_PP', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
maybe_mkdir_p(output_folder)
threshold = load_pickle(join(out_pp, e, 'threshold.pkl'))[2]
if threshold > 1000: threshold = 750 # don't make it too big!
apply_threshold_to_folder(currdir, output_folder, threshold, replace_with, num_processes)
# convert val set to brats labels for submission
output_converted = join(base_dir, 'converted_valSet')
for source in ['predVal', 'predVal_PP']:
for e in has_val_pred + ['nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold']:
expected_source_folder = join(base_dir, source, e)
if not isdir(expected_source_folder):
print(e, 'has no', source)
raise RuntimeError()
files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases))
continue
target_folder = join(output_converted, source, e)
maybe_mkdir_p(target_folder)
convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)
summarize_validation_set_predictions(output_converted)
def summarize_validation_set_predictions(base):
with open(join(base, 'summary.csv'), 'w') as f:
f.write('name,whole,core,enh,mean,whole,core,enh,mean\n')
for subf in subfolders(base, join=False):
for e in subfolders(join(base, subf), join=False):
expected = join(base, subf, e, 'Stats_Validation_final.csv')
if not isfile(expected):
print(subf, e, 'has missing csv')
continue
a = np.loadtxt(expected, delimiter=',', dtype=str)
assert a.shape[0] == 131, 'did not evaluate all 125 cases!'
selected_row = a[-5]
values = [float(i) for i in selected_row[1:4]]
f.write(e + "_" + subf + ',')
f.write("%0.4f," % values[1])
f.write("%0.4f," % values[2])
f.write("%0.4f," % values[0])
f.write("%0.4f," % np.mean(values))
values = [float(i) for i in selected_row[-3:]]
f.write("%0.4f," % values[1])
f.write("%0.4f," % values[2])
f.write("%0.4f," % values[0])
f.write("%0.4f\n" % np.mean(values))
def compute_BraTS_dice(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
:param ref:
:param gt:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 1
else:
return 0
else:
return dc(pred, ref)
def convert_all_to_BraTS(input_folder, output_folder, expected_num_cases=125):
for s in subdirs(input_folder, join=False):
nii = subfiles(join(input_folder, s), suffix='.nii.gz', join=False)
if len(nii) != expected_num_cases:
print(s)
else:
target_dir = join(output_folder, s)
convert_labels_back_to_BraTS_2018_2019_convention(join(input_folder, s), target_dir, num_processes=6)
def compute_BraTS_HD95(ref, pred):
"""
ref and gt are binary integer numpy.ndarray s
spacing is assumed to be (1, 1, 1)
:param ref:
:param pred:
:return:
"""
num_ref = np.sum(ref)
num_pred = np.sum(pred)
if num_ref == 0:
if num_pred == 0:
return 0
else:
return 373.12866
elif num_pred == 0 and num_ref != 0:
return 373.12866
else:
return hd95(pred, ref, (1, 1, 1))
def evaluate_BraTS_case(arr: np.ndarray, arr_gt: np.ndarray):
"""
attempting to reimplement the brats evaluation scheme
assumes edema=1, non_enh=2, enh=3
:param arr:
:param arr_gt:
:return:
"""
# whole tumor
mask_gt = (arr_gt != 0).astype(int)
mask_pred = (arr != 0).astype(int)
dc_whole = compute_BraTS_dice(mask_gt, mask_pred)
hd95_whole = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
# tumor core
mask_gt = (arr_gt > 1).astype(int)
mask_pred = (arr > 1).astype(int)
dc_core = compute_BraTS_dice(mask_gt, mask_pred)
hd95_core = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
# enhancing
mask_gt = (arr_gt == 3).astype(int)
mask_pred = (arr == 3).astype(int)
dc_enh = compute_BraTS_dice(mask_gt, mask_pred)
hd95_enh = compute_BraTS_HD95(mask_gt, mask_pred)
del mask_gt, mask_pred
return dc_whole, dc_core, dc_enh, hd95_whole, hd95_core, hd95_enh
def load_evaluate(filename_gt: str, filename_pred: str):
arr_pred = sitk.GetArrayFromImage(sitk.ReadImage(filename_pred))
arr_gt = sitk.GetArrayFromImage(sitk.ReadImage(filename_gt))
return evaluate_BraTS_case(arr_pred, arr_gt)
def evaluate_BraTS_folder(folder_pred, folder_gt, num_processes: int = 24, strict=False):
nii_pred = subfiles(folder_pred, suffix='.nii.gz', join=False)
if len(nii_pred) == 0:
return
nii_gt = subfiles(folder_gt, suffix='.nii.gz', join=False)
assert all([i in nii_gt for i in nii_pred]), 'not all predicted niftis have a reference file!'
if strict:
assert all([i in nii_pred for i in nii_gt]), 'not all gt niftis have a predicted file!'
p = Pool(num_processes)
nii_pred_fullpath = [join(folder_pred, i) for i in nii_pred]
nii_gt_fullpath = [join(folder_gt, i) for i in nii_pred]
results = p.starmap(load_evaluate, zip(nii_gt_fullpath, nii_pred_fullpath))
# now write to output file
with open(join(folder_pred, 'results.csv'), 'w') as f:
f.write("name,dc_whole,dc_core,dc_enh,hd95_whole,hd95_core,hd95_enh\n")
for fname, r in zip(nii_pred, results):
f.write(fname)
f.write(",%0.4f,%0.4f,%0.4f,%3.3f,%3.3f,%3.3f\n" % r)
def load_csv_for_ranking(csv_file: str):
res = np.loadtxt(csv_file, dtype='str', delimiter=',')
scores = res[1:, [1, 2, 3, -3, -2, -1]].astype(float)
scores[:, -3:] *= -1
scores[:, -3:] += 373.129
assert np.all(scores <= 373.129)
assert np.all(scores >= 0)
return scores
def rank_algorithms(data:np.ndarray):
"""
data is (metrics x experiments x cases)
:param data:
:return:
"""
num_metrics, num_experiments, num_cases = data.shape
ranks = np.zeros((num_metrics, num_experiments))
for m in range(6):
r = np.apply_along_axis(ss.rankdata, 0, -data[m], 'min')
ranks[m] = r.mean(1)
average_rank = np.mean(ranks, 0)
final_ranks = ss.rankdata(average_rank, 'min')
return final_ranks, average_rank, ranks
def score_and_postprocess_model_based_on_rank_then_aggregate():
"""
Similarly to BraTS 2017 - BraTS 2019, each participant will be ranked for each of the X test cases. Each case
includes 3 regions of evaluation, and the metrics used to produce the rankings will be the Dice Similarity
Coefficient and the 95% Hausdorff distance. Thus, for X number of cases included in the BraTS 2020, each
participant ends up having X*3*2 rankings. The final ranking score is the average of all these rankings normalized
by the number of teams.
https://zenodo.org/record/3718904
-> let's optimize for this.
Important: the outcome very much depends on the competing models. We need some references. We only got our own,
so let's hope this still works
:return:
"""
base = "/media/fabian/Results/nnUNet/3d_fullres/Task082_BraTS2020"
replace_with = 2
num_processes = 24
expected_num_cases_val = 125
# use a separate output folder from the previous experiments to ensure we are not messing things up
output_base_here = join(base, 'use_brats_ranking')
maybe_mkdir_p(output_base_here)
# collect cv niftis and compute metrics with evaluate_BraTS_folder to ensure we work with the same metrics as brats
out = join(output_base_here, 'cv_results')
experiments = subfolders(base, join=False, prefix='nnUNetTrainer')
gt_dir = join(base, 'gt_niftis')
experiments_with_full_cv = []
for e in experiments:
print(e)
o = join(out, e)
maybe_mkdir_p(o)
try:
collect_cv_niftis(join(base, e), o)
if not isfile(join(o, 'results.csv')):
evaluate_BraTS_folder(o, gt_dir, num_processes, strict=True)
experiments_with_full_cv.append(e)
except Exception as ex:
print("\nERROR\n", e, ex, "\n")
if isfile(join(o, 'results.csv')):
os.remove(join(o, 'results.csv'))
# rank the non-postprocessed models
tmp = np.loadtxt(join(out, experiments_with_full_cv[0], 'results.csv'), dtype='str', delimiter=',')
num_cases = len(tmp) - 1
data_for_ranking = np.zeros((6, len(experiments_with_full_cv), num_cases))
for i, e in enumerate(experiments_with_full_cv):
scores = load_csv_for_ranking(join(out, e, 'results.csv'))
for metric in range(6):
data_for_ranking[metric, i] = scores[:, metric]
final_ranks, average_rank, ranks = rank_algorithms(data_for_ranking)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], experiments_with_full_cv[t])
# for each model, create output directories with different thresholds. evaluate ALL OF THEM (might take a while lol)
thresholds = np.arange(25, 751, 25)
output_pp_tmp = join(output_base_here, 'cv_determine_pp_thresholds')
for e in experiments_with_full_cv:
input_folder = join(out, e)
for t in thresholds:
output_directory = join(output_pp_tmp, e, str(t))
maybe_mkdir_p(output_directory)
if not isfile(join(output_directory, 'results.csv')):
apply_threshold_to_folder(input_folder, output_directory, t, replace_with, processes=16)
evaluate_BraTS_folder(output_directory, gt_dir, num_processes)
# load ALL the results!
results = []
experiment_names = []
for e in experiments_with_full_cv:
for t in thresholds:
output_directory = join(output_pp_tmp, e, str(t))
expected_file = join(output_directory, 'results.csv')
if not isfile(expected_file):
print(e, 'does not have a results file for threshold', t)
continue
results.append(load_csv_for_ranking(expected_file))
experiment_names.append("%s___%d" % (e, t))
all_results = np.concatenate([i[None] for i in results], 0).transpose((2, 0, 1))
# concatenate with non postprocessed models
all_results = np.concatenate((data_for_ranking, all_results), 1)
experiment_names += experiments_with_full_cv
final_ranks, average_rank, ranks = rank_algorithms(all_results)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], experiment_names[t])
# for each model, print the non postprocessed model as well as the best postprocessed model. If there are
# validation set predictions, apply the best threshold to the validation set
pred_val_base = join(base, 'predVal_PP_rank')
has_val_pred = []
for e in experiments_with_full_cv:
rank_nonpp = final_ranks[experiment_names.index(e)]
avg_rank_nonpp = average_rank[experiment_names.index(e)]
print(e, avg_rank_nonpp, rank_nonpp)
predicted_val = join(base, 'predVal', e)
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
if len(pp_models) > 0:
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
print(best, best_avg_rank, min(ranks))
print('')
# apply threshold to validation set
best_threshold = int(best.split('___')[-1])
if not isdir(predicted_val):
print(e, 'has not valset predictions')
else:
files = subfiles(predicted_val, suffix='.nii.gz')
if len(files) != expected_num_cases_val:
print(e, 'has missing val cases. found: %d expected: %d' % (len(files), expected_num_cases_val))
else:
apply_threshold_to_folder(predicted_val, join(pred_val_base, e), best_threshold, replace_with, num_processes)
has_val_pred.append(e)
else:
print(e, 'not found in ranking')
# apply nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold
e = 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5'
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
best_threshold = int(best.split('___')[-1])
predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA3_BN__nnUNetPlansv2.1_bs5_15fold')
# apply nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5 to nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
e = 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5'
pp_models = [j for j, i in enumerate(experiment_names) if i.split("___")[0] == e and i != e]
ranks = [final_ranks[i] for i in pp_models]
best_idx = np.argmin(ranks)
best = experiment_names[pp_models[best_idx]]
best_avg_rank = average_rank[pp_models[best_idx]]
best_threshold = int(best.split('___')[-1])
predicted_val = join(base, 'predVal', 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
apply_threshold_to_folder(predicted_val, join(pred_val_base, 'nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold'), best_threshold, replace_with, num_processes)
has_val_pred.append('nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold')
# convert valsets
output_converted = join(base, 'converted_valSet')
for e in has_val_pred:
expected_source_folder = join(base, 'predVal_PP_rank', e)
if not isdir(expected_source_folder):
print(e, 'has no predVal_PP_rank')
raise RuntimeError()
files = subfiles(expected_source_folder, suffix='.nii.gz', join=False)
if len(files) != expected_num_cases_val:
print(e, 'prediction not done, found %d files, expected %s' % (len(files), expected_num_cases_val))
continue
target_folder = join(output_converted, 'predVal_PP_rank', e)
maybe_mkdir_p(target_folder)
convert_labels_back_to_BraTS_2018_2019_convention(expected_source_folder, target_folder)
# now load all the csvs for the validation set (obtained from evaluation platform) and rank our models on the
# validation set
flds = subdirs(output_converted, join=False)
results_valset = []
names_valset = []
for f in flds:
curr = join(output_converted, f)
experiments = subdirs(curr, join=False)
for e in experiments:
currr = join(curr, e)
expected_file = join(currr, 'Stats_Validation_final.csv')
if not isfile(expected_file):
print(f, e, "has not been evaluated yet!")
else:
res = load_csv_for_ranking(expected_file)[:-5]
assert res.shape[0] == expected_num_cases_val
results_valset.append(res[None])
names_valset.append("%s___%s" % (f, e))
results_valset = np.concatenate(results_valset, 0) # experiments x cases x metrics
# convert to metrics x experiments x cases
results_valset = results_valset.transpose((2, 0, 1))
final_ranks, average_rank, ranks = rank_algorithms(results_valset)
for t in np.argsort(final_ranks):
print(final_ranks[t], average_rank[t], names_valset[t])
if __name__ == "__main__":
"""
THIS CODE IS A MESS. IT IS PROVIDED AS IS WITH NO GUARANTEES. YOU HAVE TO DIG THROUGH IT YOURSELF. GOOD LUCK ;-)
"""
"""
REMEMBER TO CONVERT LABELS BACK TO BRATS CONVENTION AFTER PREDICTION!
"""
task_name = "Task082_BraTS2020"
downloaded_data_dir = "/home/fabian/Downloads/MICCAI_BraTS2020_TrainingData"
downloaded_data_dir_val = "/home/fabian/Downloads/MICCAI_BraTS2020_ValidationData"
target_base = join(nnUNet_raw_data, task_name)
target_imagesTr = join(target_base, "imagesTr")
target_imagesVal = join(target_base, "imagesVal")
target_imagesTs = join(target_base, "imagesTs")
target_labelsTr = join(target_base, "labelsTr")
maybe_mkdir_p(target_imagesTr)
maybe_mkdir_p(target_imagesVal)
maybe_mkdir_p(target_imagesTs)
maybe_mkdir_p(target_labelsTr)
patient_names = []
cur = join(downloaded_data_dir)
for p in subdirs(cur, join=False):
patdir = join(cur, p)
patient_name = p
patient_names.append(patient_name)
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
seg = join(patdir, p + "_seg.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
isfile(seg)
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTr, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTr, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTr, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTr, patient_name + "_0003.nii.gz"))
copy_BraTS_segmentation_and_convert_labels(seg, join(target_labelsTr, patient_name + ".nii.gz"))
json_dict = OrderedDict()
json_dict['name'] = "BraTS2020"
json_dict['description'] = "nothing"
json_dict['tensorImageSize'] = "4D"
json_dict['reference'] = "see BraTS2020"
json_dict['licence'] = "see BraTS2020 license"
json_dict['release'] = "0.0"
json_dict['modality'] = {
"0": "T1",
"1": "T1ce",
"2": "T2",
"3": "FLAIR"
}
json_dict['labels'] = {
"0": "background",
"1": "edema",
"2": "non-enhancing",
"3": "enhancing",
}
json_dict['numTraining'] = len(patient_names)
json_dict['numTest'] = 0
json_dict['training'] = [{'image': "./imagesTr/%s.nii.gz" % i, "label": "./labelsTr/%s.nii.gz" % i} for i in
patient_names]
json_dict['test'] = []
save_json(json_dict, join(target_base, "dataset.json"))
if downloaded_data_dir_val is not None:
for p in subdirs(downloaded_data_dir_val, join=False):
patdir = join(downloaded_data_dir_val, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesVal, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesVal, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesVal, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesVal, patient_name + "_0003.nii.gz"))
downloaded_data_dir_test = "/home/fabian/Downloads/MICCAI_BraTS2020_TestingData"
if isdir(downloaded_data_dir_test):
for p in subdirs(downloaded_data_dir_test, join=False):
patdir = join(downloaded_data_dir_test, p)
patient_name = p
t1 = join(patdir, p + "_t1.nii.gz")
t1c = join(patdir, p + "_t1ce.nii.gz")
t2 = join(patdir, p + "_t2.nii.gz")
flair = join(patdir, p + "_flair.nii.gz")
assert all([
isfile(t1),
isfile(t1c),
isfile(t2),
isfile(flair),
]), "%s" % patient_name
shutil.copy(t1, join(target_imagesTs, patient_name + "_0000.nii.gz"))
shutil.copy(t1c, join(target_imagesTs, patient_name + "_0001.nii.gz"))
shutil.copy(t2, join(target_imagesTs, patient_name + "_0002.nii.gz"))
shutil.copy(flair, join(target_imagesTs, patient_name + "_0003.nii.gz"))
# test set
# nnUNet_ensemble -f nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold -o ensembled_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold
# apply_threshold_to_folder('ensembled_nnUNetTrainerV2BraTSRegions_DA3_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN_BD__nnUNetPlansv2.1_bs5_5fold__nnUNetTrainerV2BraTSRegions_DA4_BN__nnUNetPlansv2.1_bs5_15fold/', 'ensemble_PP100/', 100, 2)
# convert_labels_back_to_BraTS_2018_2019_convention('ensemble_PP100/', 'ensemble_PP100_converted')
|
py
|
1a5cee7d7425f97d4cc3dfaf45ad0718b220ee10
|
#!/usr/bin/env python
#
# Gather revision statistics from version control system.
# The process is not optimized for speed or anything. It
# just uses command line tools and updates working copy
# from revision to revision..
#
# 1. Get a fresh clone / checkout
# 2. Run gather.py inside of it
#
#
# --- dataset 1: size ---
#
# - size of all files in revision
# - number of files
# - number of dirs
#
#
# status
#
# [ ] subversion traversing
# [x] getting list of all revisions
# [ ] processing one revision at a time
# [ ] update copy to revision
# [x] stop on error
# [ ] save progress
# [ ] rollback bad revision data
# [ ] ...
#
# [ ] mercurial traversing
# [x] getting list of all revisions
# [ ] processing one revision at a time
# [x] update copy to revision
# [x] stop on error
# [ ] save progress
# [ ] rollback bad revision data
# [ ] ...
import copy
import os
import sys
import subprocess
PY3K = sys.version_info[0] == 3
SET1 = {
'totalsize': 0,
'dirsnum': 0,
'filesnum': 0,
}
def echo(msg):
'''only for debug'''
pass # print msg
def runout(cmd):
if not PY3K:
return subprocess.check_output(cmd, shell=True)
else:
return subprocess.check_output(cmd, shell=True).decode('utf-8')
class HG(object):
def check_clean(self):
"""check that working copy is clean and can be
successfully updated to any revision"""
if len(runout('hg status')) != 0:
return False
else:
return True
def revlist(self):
"""get list of revisions from oldest to youngest"""
output = runout('hg log --template "{rev}\\n"')
rev = []
for line in output.splitlines():
rev.append(line)
return reversed(rev)
def up(self, rev):
runout('hg up -r %s' % rev)
class SVN(object):
def check_clean(self):
"""check that working copy is clean and can be
successfully updated to any revision"""
if len(runout('svn status')) != 0:
return False
else:
return True
def revlist(self):
"""get list of revisions from oldest to youngest"""
output = runout('svn info -r HEAD')
lastrev = 0
for line in output.splitlines():
if line.startswith('Revision: '):
lastrev = line.strip().split()[1]
lastrev = int(lastrev)
rev = range(1, lastrev + 1)
return rev
def up(self, rev):
runout('svn up -r %s' % rev)
def process(path, ignore=[]):
"""calculate SET1 directory stats for given path, skipping
directories mentioned in ignore (e.g. '.hg', '.svn', ...)
"""
if not PY3K:
# unicode is critical to for non-English local names on Windows
path = unicode(path)
s = copy.copy(SET1)
s['totalsize'] = 0
for root, dirs, files in os.walk(path):
# filter directories
for ig in ignore:
if ig in dirs:
dirs.remove(ig)
for f in files:
s['totalsize'] += os.path.getsize(os.path.join(root, f))
s['filesnum'] += len(files)
s['dirsnum'] += len(dirs)
return s
if __name__ == '__main__':
# detect version control type
i = 0
reptype = None
for n in ['.svn', '.hg']:
if os.path.isdir(n):
i += 1
reptype = n
if i == 0:
sys.exit('Error: Can\'t detect version control system')
if i > 1:
sys.exit('Error: Detected several version control systems')
# get API to repository information
if reptype == '.hg':
repapi = HG()
echo('HG selected')
else:
repapi = SVN()
echo('SVN selected')
# get clearance
if not repapi.check_clean():
sys.exit('Error: Working copy is not clean, can not continue')
# CSV header
sys.stdout.write("revision, size, dirs, files\n")
for rev in repapi.revlist():
repapi.up(rev)
line = process('.', ignore=[reptype])
line['rev'] = rev
#print line
s = "{rev}, {totalsize}, {dirsnum}, {filesnum}\n".format(**line)
sys.stdout.write(s)
sys.stdout.flush()
|
py
|
1a5cf03f896a123974d58f45d4798bca6adfe817
|
import pyaf.Bench.TS_datasets as tsds
import pyaf.tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 128 , FREQ = 'D', seed = 0, trendtype = "PolyTrend", cycle_length = 7, transform = "Quantization", sigma = 0.0, exog_count = 0, ar_order = 12);
|
py
|
1a5cf0d7a49120fedda3e5b7fb4358973c2028a5
|
import copy
from abc import ABC
from typing import Generic, TypeVar, Union, Sequence, Callable, Optional, \
Dict, Any, Iterable, List, Set
from avalanche.benchmarks.scenarios.generic_definitions import \
TExperience, ScenarioStream, TScenarioStream, Experience, TScenario, \
TrainSet, TestSet
from avalanche.benchmarks.utils import AvalancheDataset, \
AvalancheSubset
TGenericCLScenario = TypeVar('TGenericCLScenario', bound='GenericCLScenario')
TGenericExperience = TypeVar('TGenericExperience', bound='GenericExperience')
TGenericScenarioStream = TypeVar('TGenericScenarioStream',
bound='GenericScenarioStream')
class GenericCLScenario(Generic[TrainSet, TestSet, TExperience]):
"""
Base implementation of a Continual Learning scenario. A Continual Learning
scenario is defined by a sequence of experiences (batches or tasks depending
on the terminology), with each experience containing the training (or test)
data that becomes available at a certain time instant.
From a practical point of view, this means that we usually have to define
two datasets (training and test), and some way to assign the patterns
contained in these datasets to each experience.
This assignment is usually made in children classes, with this class serving
as the more general implementation. This class handles the most simple type
of assignment: each experience is defined by a list of patterns (identified
by their indexes) contained in that experience.
"""
def __init__(self: TGenericCLScenario,
original_train_dataset: TrainSet,
original_test_dataset: TestSet,
train_dataset: AvalancheDataset,
test_dataset: AvalancheDataset,
train_exps_patterns_assignment: Sequence[Sequence[int]],
test_exps_patterns_assignment: Sequence[Sequence[int]],
task_labels: Sequence[List[int]],
pattern_train_task_labels: Sequence[int],
pattern_test_task_labels: Sequence[int],
complete_test_set_only: bool = False,
reproducibility_data: Optional[Dict[str, Any]] = None,
experience_factory: Callable[['GenericScenarioStream', int],
TExperience] = None):
"""
Creates an instance of a Continual Learning scenario.
The scenario is defined by the train and test datasets plus the
assignment of patterns to experiences (batches/tasks).
:param train_dataset: The training dataset. The dataset must be a
subclass of :class:`AvalancheDataset`. For instance, one can
use the datasets from the torchvision package like that:
``train_dataset=AvalancheDataset(torchvision_dataset)``.
:param test_dataset: The test dataset. The dataset must be a
subclass of :class:`AvalancheDataset`. For instance, one can
use the datasets from the torchvision package like that:
``test_dataset=AvalancheDataset(torchvision_dataset)``.
:param train_exps_patterns_assignment: A list of experiences. Each
experience is in turn defined by a list of integers describing the
pattern index inside the training dataset.
:param test_exps_patterns_assignment: A list of experiences. Each
experience is in turn defined by a list of integers describing the
pattern index inside the test dataset.
:param task_labels: The mapping from experience IDs to task labels,
usually as a list of integers.
:param pattern_train_task_labels: The list of task labels of each
pattern in the `train_dataset`.
:param pattern_test_task_labels: The list of task labels of each
pattern in the `test_dataset`.
:param complete_test_set_only: If True, only the complete test
set will be returned from test set related methods of the linked
:class:`GenericExperience` instances. This also means that the
``test_exps_patterns_assignment`` parameter can be a single element
or even an empty list (in which case, the full set defined by
the ``test_dataset`` parameter will be returned). The returned
task label for the complete test set will be the first element
of the ``task_labels`` parameter. Defaults to False, which means
that ```train_exps_patterns_assignment`` and
``test_exps_patterns_assignment`` parameters must describe an equal
amount of experiences.
:param reproducibility_data: If not None, overrides the
``train/test_exps_patterns_assignment`` and ``task_labels``
parameters. This is usually a dictionary containing data used to
reproduce a specific experiment. One can use the
``get_reproducibility_data`` method to get (and even distribute)
the experiment setup so that it can be loaded by passing it as this
parameter. In this way one can be sure that the same specific
experimental setup is being used (for reproducibility purposes).
Beware that, in order to reproduce an experiment, the same train and
test datasets must be used. Defaults to None.
:param experience_factory: If not None, a callable that, given the
scenario instance and the experience ID, returns a experience
instance. This parameter is usually used in subclasses (when
invoking the super constructor) to specialize the experience class.
Defaults to None, which means that the :class:`GenericExperience`
constructor will be used.
"""
self.original_train_dataset: TrainSet = original_train_dataset
""" The original training set. """
self.original_test_dataset: TestSet = original_test_dataset
""" The original test set. """
self.train_exps_patterns_assignment: Sequence[Sequence[int]]
""" A list containing which training patterns are assigned to each
experience. Patterns are identified by their id w.r.t. the dataset found
in the train_dataset field. """
self.test_exps_patterns_assignment: Sequence[Sequence[int]]
""" A list containing which test patterns are assigned to each
experience. Patterns are identified by their id w.r.t. the dataset found
in the test_dataset field """
self.task_labels: Sequence[List[int]] = task_labels
""" The task label of each experience. """
self.pattern_train_task_labels: Sequence[int] = \
pattern_train_task_labels
""" The task label of each pattern in the training dataset. """
self.pattern_test_task_labels: Sequence[int] = pattern_test_task_labels
""" The task label of each pattern in the test dataset. """
self.train_exps_patterns_assignment: Sequence[Sequence[int]] = \
train_exps_patterns_assignment
self.test_exps_patterns_assignment: Sequence[Sequence[int]] = \
test_exps_patterns_assignment
self.complete_test_set_only: bool = bool(complete_test_set_only)
"""
If True, only the complete test set will be returned from experience
instances.
This flag is usually set to True in scenarios where having one separate
test set aligned to each training experience is impossible or doesn't
make sense from a semantic point of view.
"""
if reproducibility_data is not None:
self.train_exps_patterns_assignment = reproducibility_data['train']
self.test_exps_patterns_assignment = reproducibility_data['test']
self.task_labels = reproducibility_data['task_labels']
self.pattern_train_task_labels = reproducibility_data[
'pattern_train_task_labels']
self.pattern_test_task_labels = reproducibility_data[
'pattern_test_task_labels']
self.complete_test_set_only = \
reproducibility_data['complete_test_only']
self.n_experiences: int = len(self.train_exps_patterns_assignment)
""" The number of incremental experiences this scenario is made of. """
if experience_factory is None:
experience_factory = GenericExperience
self.experience_factory: Callable[[TGenericScenarioStream, int],
TExperience] = experience_factory
if self.complete_test_set_only:
if len(self.test_exps_patterns_assignment) > 1:
raise ValueError(
'complete_test_set_only is True, but '
'test_exps_patterns_assignment contains more than one '
'element')
elif len(self.train_exps_patterns_assignment) != \
len(self.test_exps_patterns_assignment):
raise ValueError('There must be the same amount of train and '
'test experiences')
if len(self.train_exps_patterns_assignment) != len(self.task_labels):
raise ValueError('There must be the same number of train '
'experiences and task labels')
self.train_dataset: AvalancheDataset = AvalancheDataset(
train_dataset, task_labels=self.pattern_train_task_labels)
""" The training set used to generate the incremental experiences. """
self.test_dataset: AvalancheDataset = AvalancheDataset(
test_dataset, task_labels=self.pattern_test_task_labels)
""" The test set used to generate the incremental experiences. """
self.train_stream: GenericScenarioStream[
TExperience, TGenericCLScenario] = GenericScenarioStream('train',
self)
"""
The stream used to obtain the training experiences.
This stream can be sliced in order to obtain a subset of this stream.
"""
self.test_stream: GenericScenarioStream[
TExperience, TGenericCLScenario] = GenericScenarioStream('test',
self)
"""
The stream used to obtain the test experiences. This stream can be
sliced in order to obtain a subset of this stream.
Beware that, in certain scenarios, this stream may contain a single
element. Check the ``complete_test_set_only`` field for more details.
"""
def get_reproducibility_data(self) -> Dict[str, Any]:
"""
Gets the data needed to reproduce this experiment.
This data can be stored using the pickle module or some other mechanism.
It can then be loaded by passing it as the ``reproducibility_data``
parameter in the constructor.
Child classes should get the reproducibility dictionary from super class
and then merge their custom data before returning it.
:return: A dictionary containing the data needed to reproduce the
experiment.
"""
train_exps = []
for train_exp_id in range(len(self.train_exps_patterns_assignment)):
train_exp = self.train_exps_patterns_assignment[train_exp_id]
train_exps.append(list(train_exp))
test_exps = []
for test_exp_id in range(len(self.test_exps_patterns_assignment)):
test_exp = self.test_exps_patterns_assignment[test_exp_id]
test_exps.append(list(test_exp))
return {'train': train_exps, 'test': test_exps,
'task_labels': list(self.task_labels),
'complete_test_only': bool(self.complete_test_set_only),
'pattern_train_task_labels': list(
self.pattern_train_task_labels),
'pattern_test_task_labels': list(self.pattern_test_task_labels)}
def get_classes_timeline(self, current_experience: int):
"""
Returns the classes timeline for a this scenario.
Given a experience ID, this method returns the classes in this
experience, previously seen classes, the cumulative class list and a
list of classes that will be encountered in next experiences.
:param current_experience: The reference experience ID.
:return: A tuple composed of four lists: the first list contains the
IDs of classes in this experience, the second contains IDs of
classes seen in previous experiences, the third returns a cumulative
list of classes (that is, the union of the first two list) while the
last one returns a list of classes that will be encountered in next
experiences.
"""
train_exps_patterns_assignment: Sequence[Sequence[int]]
class_set_current_exp = self.classes_in_experience[current_experience]
classes_in_this_exp = list(class_set_current_exp)
class_set_prev_exps = set()
for exp_id in range(0, current_experience):
class_set_prev_exps.update(self.classes_in_experience[exp_id])
previous_classes = list(class_set_prev_exps)
classes_seen_so_far = \
list(class_set_current_exp.union(class_set_prev_exps))
class_set_future_exps = set()
for exp_id in range(current_experience, self.n_experiences):
class_set_prev_exps.update(self.classes_in_experience[exp_id])
future_classes = list(class_set_future_exps)
return (classes_in_this_exp, previous_classes, classes_seen_so_far,
future_classes)
@property
def classes_in_experience(self) -> Sequence[Set[int]]:
""" A list that, for each experience (identified by its index/ID),
stores a set of the (optionally remapped) IDs of classes of patterns
assigned to that experience. """
return LazyClassesInExps(self)
class GenericScenarioStream(Generic[TExperience, TGenericCLScenario],
ScenarioStream[TGenericCLScenario, TExperience],
Sequence[TExperience]):
def __init__(self: TGenericScenarioStream,
name: str,
scenario: TGenericCLScenario,
*,
slice_ids: List[int] = None):
self.slice_ids: Optional[List[int]] = slice_ids
"""
Describes which experiences are contained in the current stream slice.
Can be None, which means that this object is the original stream. """
self.name: str = name
self.scenario = scenario
def __len__(self) -> int:
"""
Gets the number of experiences this scenario it's made of.
:return: The number of experiences in this scenario.
"""
if self.slice_ids is None:
if self.name == 'train':
return len(self.scenario.train_exps_patterns_assignment)
elif self.scenario.complete_test_set_only:
return 1
else:
return len(self.scenario.test_exps_patterns_assignment)
else:
return len(self.slice_ids)
def __getitem__(self, exp_idx: Union[int, slice, Iterable[int]]) -> \
Union[TExperience, TScenarioStream]:
"""
Gets a experience given its experience index (or a stream slice given
the experience order).
:param exp_idx: An int describing the experience index or an
iterable/slice object describing a slice of this stream.
:return: The experience instance associated to the given experience
index or a sliced stream instance.
"""
if isinstance(exp_idx, int):
if exp_idx < len(self):
if self.slice_ids is None:
return self.scenario.experience_factory(self, exp_idx)
else:
return self.scenario.experience_factory(
self, self.slice_ids[exp_idx])
raise IndexError('Experience index out of bounds' +
str(int(exp_idx)))
else:
return self._create_slice(exp_idx)
def _create_slice(self: TGenericScenarioStream,
exps_slice: Union[int, slice, Iterable[int]]) \
-> TScenarioStream:
"""
Creates a sliced version of this stream.
In its base version, a shallow copy of this stream is created and
then its ``slice_ids`` field is adapted.
:param exps_slice: The slice to use.
:return: A sliced version of this stream.
"""
stream_copy = copy.copy(self)
slice_exps = _get_slice_ids(exps_slice, len(self))
if self.slice_ids is None:
stream_copy.slice_ids = slice_exps
else:
stream_copy.slice_ids = [self.slice_ids[x] for x in slice_exps]
return stream_copy
class LazyClassesInExps(Sequence[Set[int]]):
def __init__(self, scenario: GenericCLScenario):
self._scenario = scenario
def __len__(self):
return len(self._scenario.train_stream)
def __getitem__(self, exp_id) -> Set[int]:
return set(
[self._scenario.train_dataset.targets[pattern_idx]
for pattern_idx
in self._scenario.train_exps_patterns_assignment[exp_id]])
def __str__(self):
return '[' + \
', '.join([str(self[idx]) for idx in range(len(self))]) + \
']'
def _get_slice_ids(slice_definition: Union[int, slice, Iterable[int]],
sliceable_len: int) -> List[int]:
# Obtain experiences list from slice object (or any iterable)
exps_list: List[int]
if isinstance(slice_definition, slice):
exps_list = list(
range(*slice_definition.indices(sliceable_len)))
elif isinstance(slice_definition, int):
exps_list = [slice_definition]
elif hasattr(slice_definition, 'shape') and \
len(getattr(slice_definition, 'shape')) == 0:
exps_list = [int(slice_definition)]
else:
exps_list = list(slice_definition)
# Check experience id(s) boundaries
if max(exps_list) >= sliceable_len:
raise IndexError(
'Experience index out of range: ' + str(max(exps_list)))
if min(exps_list) < 0:
raise IndexError(
'Experience index out of range: ' + str(min(exps_list)))
return exps_list
class AbstractExperience(Experience[TScenario, TScenarioStream], ABC):
"""
Definition of a learning experience. A learning experience contains a set of
patterns which has become available at a particular time instant. The
content and size of an Experience is defined by the specific benchmark that
creates the experience.
For instance, an experience of a New Classes scenario will contain all
patterns belonging to a subset of classes of the original training set. An
experience of a New Instance scenario will contain patterns from previously
seen classes.
Experiences of Single Incremental Task (a.k.a. task-free) scenarios are
usually called "batches" while in Multi Task scenarios an Experience is
usually associated to a "task". Finally, in a Multi Incremental Task
scenario the Experience may be composed by patterns from different tasks.
"""
def __init__(
self: TExperience,
origin_stream: TScenarioStream,
current_experience: int,
classes_in_this_exp: Sequence[int],
previous_classes: Sequence[int],
classes_seen_so_far: Sequence[int],
future_classes: Optional[Sequence[int]]):
"""
Creates an instance of the abstract experience given the scenario
stream, the current experience ID and data about the classes timeline.
:param origin_stream: The stream from which this experience was
obtained.
:param current_experience: The current experience ID, as an integer.
:param classes_in_this_exp: The list of classes in this experience.
:param previous_classes: The list of classes in previous experiences.
:param classes_seen_so_far: List of classes of current and previous
experiences.
:param future_classes: The list of classes of next experiences.
"""
self.origin_stream: TScenarioStream = origin_stream
# scenario keeps a reference to the base scenario
self.scenario: TScenario = origin_stream.scenario
# current_experience is usually an incremental, 0-indexed, value used to
# keep track of the current batch/task.
self.current_experience: int = current_experience
self.classes_in_this_experience: Sequence[int] = classes_in_this_exp
""" The list of classes in this experience """
self.previous_classes: Sequence[int] = previous_classes
""" The list of classes in previous experiences """
self.classes_seen_so_far: Sequence[int] = classes_seen_so_far
""" List of classes of current and previous experiences """
self.future_classes: Optional[Sequence[int]] = future_classes
""" The list of classes of next experiences """
@property
def task_label(self) -> int:
"""
The task label. This value will never have value "None". However,
for scenarios that don't produce task labels a placeholder value like 0
is usually set. Beware that this field is meant as a shortcut to obtain
a unique task label: it assumes that only patterns labeled with a
single task label are present. If this experience contains patterns from
multiple tasks, accessing this property will result in an exception.
"""
if len(self.task_labels) != 1:
raise ValueError('The task_label property can only be accessed '
'when the experience contains a single task label')
return self.task_labels[0]
class GenericExperience(AbstractExperience[TGenericCLScenario,
GenericScenarioStream[
TGenericExperience,
TGenericCLScenario]]):
"""
Definition of a learning experience based on a :class:`GenericCLScenario`
instance.
This experience implementation uses the generic experience-patterns
assignment defined in the :class:`GenericCLScenario` instance. Instances of
this class are usually obtained from a scenario stream.
"""
def __init__(self: TGenericExperience,
origin_stream: GenericScenarioStream[TGenericExperience,
TGenericCLScenario],
current_experience: int):
"""
Creates an instance of a generic experience given the stream from this
experience was taken and and the current experience ID.
:param origin_stream: The stream from which this experience was
obtained.
:param current_experience: The current experience ID, as an integer.
"""
(classes_in_this_exp, previous_classes, classes_seen_so_far,
future_classes) = origin_stream.scenario.get_classes_timeline(
current_experience)
super(GenericExperience, self).__init__(
origin_stream, current_experience, classes_in_this_exp,
previous_classes, classes_seen_so_far, future_classes)
@property
def dataset(self) -> AvalancheDataset:
if self._is_train():
dataset = self.scenario.train_dataset
patterns_indexes = \
self.scenario.train_exps_patterns_assignment[
self.current_experience]
else:
dataset = self.scenario.test_dataset
if self.scenario.complete_test_set_only:
patterns_indexes = None
else:
patterns_indexes = self.scenario.test_exps_patterns_assignment[
self.current_experience]
return AvalancheSubset(dataset, indices=patterns_indexes)
@property
def task_labels(self) -> List[int]:
if self._is_train():
return self.scenario.task_labels[self.current_experience]
else:
if self.scenario.complete_test_set_only:
return self.scenario.task_labels[0]
else:
return self.scenario.task_labels[self.current_experience]
def _is_train(self):
return self.origin_stream.name == 'train'
__all__ = [
'TGenericCLScenario',
'GenericCLScenario',
'GenericScenarioStream',
'AbstractExperience',
'GenericExperience',
]
|
py
|
1a5cf335220759f3761b0e5ce04f0f61bc2a1359
|
import random
import numpy as np
import tensorflow as tf
import scipy.sparse as sp
from .base_sequence import Sequence
class MiniBatchSequence(Sequence):
def __init__(
self,
x,
y,
out_weight=None,
shuffle=False,
batch_size=1,
*args, **kwargs
):
super().__init__(*args, **kwargs)
assert batch_size == 1
self.n_batches = len(x)
self.shuffle = shuffle
self.indices = list(range(self.n_batches))
self.batch_size = batch_size
self.x, self.y, self.out_weight = self.astensors(x, y, out_weight)
def __len__(self):
return self.n_batches
def __getitem__(self, index):
idx = self.indices[index]
return self.x[idx], self.y[idx], self.out_weight[idx]
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
class SAGEMiniBatchSequence(Sequence):
def __init__(
self,
x,
y=None,
out_weight=None,
sizes=[5, 5],
shuffle=False,
batch_size=512,
*args, **kwargs
):
super().__init__(*args, **kwargs)
self.node_attr, self.adj_matrix, self.batch_nodes = x
self.y = y
self.n_batches = int(np.ceil(len(self.batch_nodes) / batch_size))
self.shuffle = shuffle
self.batch_size = batch_size
self.indices = np.arange(len(self.batch_nodes))
self.sizes = sizes
self.node_attr = self.astensor(self.node_attr)
def __len__(self):
return self.n_batches
def __getitem__(self, index):
if self.shuffle:
idx = self.indices[index *
self.batch_size:(index + 1) * self.batch_size]
else:
idx = slice(index * self.batch_size, (index + 1) * self.batch_size)
nodes_input = [self.batch_nodes[idx]]
for num_sample in self.sizes:
neighbors = sample_neighbors(
self.adj_matrix, nodes_input[-1], num_sample).ravel()
nodes_input.append(neighbors)
y = self.y[idx] if self.y is not None else None
return self.astensors([self.node_attr, *nodes_input], y)
def on_epoch_end(self):
pass
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
def sample_neighbors(adj_matrix, nodes, num_neighbors):
np.random.shuffle(adj_matrix.T)
return adj_matrix[nodes, :num_neighbors]
class FastGCNBatchSequence(Sequence):
def __init__(
self,
x,
y=None,
shuffle=False,
batch_size=None,
rank=None,
*args, **kwargs
):
super().__init__(*args, **kwargs)
node_attr, adj_matrix = x
self.y = y
self.n_batches = int(
np.ceil(adj_matrix.shape[0] / batch_size)) if batch_size else 1
self.shuffle = shuffle
self.batch_size = batch_size
self.indices = np.arange(adj_matrix.shape[0])
self.rank = rank
if rank:
self.p = column_prop(adj_matrix)
self.node_attr, self.adj_matrix = node_attr, adj_matrix
def __len__(self):
return self.n_batches
def __getitem__(self, index):
if not self.batch_size:
(node_attr, adj_matrix), y = self.full_batch()
else:
(node_attr, adj_matrix), y = self.mini_batch(index)
if self.rank:
p = self.p
rank = self.rank
distr = adj_matrix.sum(0).A1.nonzero()[0]
if rank > distr.size:
q = distr
else:
q = np.random.choice(
distr, rank, replace=False, p=p[distr] / p[distr].sum())
adj_matrix = adj_matrix[:, q].dot(sp.diags(1.0 / (p[q] * rank)))
if tf.is_tensor(node_attr):
node_attr = tf.gather(node_attr, q)
else:
node_attr = node_attr[q]
return self.astensors((node_attr, adj_matrix), y)
def full_batch(self):
return (self.node_attr, self.adj_matrix), self.y
def mini_batch(self, index):
if self.shuffle:
idx = self.indices[index *
self.batch_size:(index + 1) * self.batch_size]
else:
idx = slice(index * self.batch_size, (index + 1) * self.batch_size)
y = self.y[idx]
adj_matrix = self.adj_matrix[idx]
node_attr = self.node_attr
return (node_attr, adj_matrix), y
def on_epoch_end(self):
pass
def on_epoch_end(self):
if self.shuffle:
self._shuffle_batches()
def _shuffle_batches(self):
"""
Shuffle all nodes at the end of each epoch
"""
random.shuffle(self.indices)
def column_prop(adj):
column_norm = sp.linalg.norm(adj, axis=0)
norm_sum = column_norm.sum()
return column_norm / norm_sum
|
py
|
1a5cf469bcb93a0528c161d72688e51baeb3ce0e
|
# Package placeholder
from .git import GitRepos
|
py
|
1a5cf53089665a91e9c3555ac2f00e5f2d1fcd7d
|
"""
Copyright (c) 2017 Eric Shook. All rights reserved.
Use of this source code is governed by a BSD-style license that can be found in the LICENSE file.
@author: eshook (Eric Shook, [email protected])
@contributors: <Contribute and add your name here!>
"""
from .Primitive import *
from .Primitives import *
from .IO import *
|
py
|
1a5cf547e1866673b4e8977653812533e614b960
|
from fibber.datasets.dataset_utils import (
DatasetForBert, clip_sentence, get_dataset, get_demo_dataset, subsample_dataset,
verify_dataset)
__all__ = [
"get_dataset",
"subsample_dataset",
"verify_dataset",
"DatasetForBert",
"get_demo_dataset",
"builtin_datasets",
"clip_sentence"]
builtin_datasets = [
"ag", "ag_no_title", "mr", "imdb", "yelp", "snli", "mnli", "mnli_mis", "qnli", "sst2",
"expert_layman", "GYAFC_Corpus"
]
|
py
|
1a5cf59cc8adddb0887ede1e2d2b1ee36a1ee08b
|
#!/usr/bin/env python3
# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import shutil
import subprocess
from glob import glob
from common import die
def main() -> None:
ensure_shellcheck_installed()
run_shellcheck()
def ensure_shellcheck_installed() -> None:
if shutil.which("shellcheck") is None:
die(
"`shellcheck` not installed! You may download this through your operating system's "
"package manager, such as brew, apt, or yum. See "
"https://github.com/koalaman/shellcheck#installing."
)
def run_shellcheck() -> None:
targets = set(glob("./**/*.sh", recursive=True)) | {
"./pants",
"./build-support/pants_venv",
"./build-support/virtualenv",
"./build-support/githooks/pre-commit",
"./build-support/githooks/prepare-commit-msg",
}
targets -= set(glob("./build-support/bin/native/src/**/*.sh", recursive=True))
targets -= set(glob("./build-support/virtualenv.dist/**/*.sh", recursive=True))
targets -= set(glob("./build-support/virtualenvs/**/*.sh", recursive=True))
targets -= set(glob("./build-support/twine-deps.venv/**/*.sh", recursive=True))
command = ["shellcheck", "--shell=bash", "--external-sources"] + sorted(targets)
try:
subprocess.run(command, check=True)
except subprocess.CalledProcessError:
die("Please fix the above errors and run again.")
if __name__ == "__main__":
main()
|
py
|
1a5cf8490a663dd2cf717b2b63a022d5303bd5f4
|
import click
from dagos.commands.wsl.import_wsl_distro import import_wsl_distro
from dagos.commands.wsl.prepare_wsl_distro import prepare_wsl_distro
@click.group(no_args_is_help=True)
def wsl():
"""
Prepare or import WSL distros.
"""
pass
wsl.add_command(import_wsl_distro)
wsl.add_command(prepare_wsl_distro)
|
py
|
1a5cf8eb52d5aebb925fbcd441c4f6fcae43899b
|
# Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
from __future__ import absolute_import
import logging
import os
import boto3
import pytest
from sagemaker import LocalSession, Session
from sagemaker.mxnet import MXNet
from test.integration import NO_P2_REGIONS, NO_P3_REGIONS
logger = logging.getLogger(__name__)
logging.getLogger('boto').setLevel(logging.INFO)
logging.getLogger('botocore').setLevel(logging.INFO)
logging.getLogger('factory.py').setLevel(logging.INFO)
logging.getLogger('auth.py').setLevel(logging.INFO)
logging.getLogger('connectionpool.py').setLevel(logging.INFO)
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
def pytest_addoption(parser):
parser.addoption('--docker-base-name', default='preprod-mxnet-serving')
parser.addoption('--region', default='us-west-2')
parser.addoption('--framework-version', default=MXNet.LATEST_VERSION)
parser.addoption('--py-version', default='3', choices=['2', '3', '2,3'])
parser.addoption('--processor', default='cpu', choices=['gpu', 'cpu', 'cpu,gpu'])
parser.addoption('--aws-id', default=None)
parser.addoption('--instance-type', default=None)
parser.addoption('--accelerator-type', default=None)
# If not specified, will default to {framework-version}-{processor}-py{py-version}
parser.addoption('--tag', default=None)
def pytest_generate_tests(metafunc):
if 'py_version' in metafunc.fixturenames:
py_version_params = ['py' + v for v in metafunc.config.getoption('--py-version').split(',')]
metafunc.parametrize('py_version', py_version_params, scope='session')
if 'processor' in metafunc.fixturenames:
processor_params = metafunc.config.getoption('--processor').split(',')
metafunc.parametrize('processor', processor_params, scope='session')
@pytest.fixture(scope='session')
def docker_base_name(request):
return request.config.getoption('--docker-base-name')
@pytest.fixture(scope='session')
def region(request):
return request.config.getoption('--region')
@pytest.fixture(scope='session')
def framework_version(request):
return request.config.getoption('--framework-version')
@pytest.fixture(scope='session')
def aws_id(request):
return request.config.getoption('--aws-id')
@pytest.fixture(scope='session')
def tag(request, framework_version, processor, py_version):
provided_tag = request.config.getoption('--tag')
default_tag = '{}-{}-{}'.format(framework_version, processor, py_version)
return provided_tag if provided_tag is not None else default_tag
@pytest.fixture(scope='session')
def instance_type(request, processor):
provided_instance_type = request.config.getoption('--instance-type')
default_instance_type = 'ml.c4.xlarge' if processor == 'cpu' else 'ml.p2.xlarge'
return provided_instance_type if provided_instance_type is not None else default_instance_type
@pytest.fixture(scope='session')
def accelerator_type(request):
return request.config.getoption('--accelerator-type')
@pytest.fixture(scope='session')
def docker_image(docker_base_name, tag):
return '{}:{}'.format(docker_base_name, tag)
@pytest.fixture(scope='session')
def ecr_image(aws_id, docker_base_name, tag, region):
return '{}.dkr.ecr.{}.amazonaws.com/{}:{}'.format(aws_id, region, docker_base_name, tag)
@pytest.fixture(scope='session')
def sagemaker_session(region):
return Session(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session')
def sagemaker_local_session(region):
return LocalSession(boto_session=boto3.Session(region_name=region))
@pytest.fixture(scope='session')
def local_instance_type(processor):
return 'local' if processor == 'cpu' else 'local_gpu'
@pytest.fixture(autouse=True)
def skip_gpu_instance_restricted_regions(region, instance_type):
no_p2 = region in NO_P2_REGIONS and instance_type.startswith('ml.p2')
no_p3 = region in NO_P3_REGIONS and instance_type.startswith('ml.p3')
if no_p2 or no_p3:
pytest.skip('Skipping GPU test in region {} to avoid insufficient capacity'.format(region))
@pytest.fixture(autouse=True)
def skip_py2_containers(request, tag):
if request.node.get_closest_marker('skip_py2_containers'):
if 'py2' in tag:
pytest.skip('Skipping python2 container with tag {}'.format(tag))
|
py
|
1a5cfb0c279f5b31630448aa828cc75f83a0895e
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Yahoo! Finance market data downloader (+fix for Pandas Datareader)
# https://github.com/ranaroussi/yfinance
#
# Copyright 2017-2019 Ran Aroussi
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import time as _time
import datetime as _datetime
import requests as _requests
import pandas as _pd
import numpy as _np
from . import utils
# import json as _json
# import re as _re
# import sys as _sys
from . import shared
class TickerBase():
def __init__(self, ticker):
self.ticker = ticker.upper()
self._history = None
self._base_url = 'https://query1.finance.yahoo.com'
self._scrape_url = 'https://finance.yahoo.com/quote'
self._fundamentals = False
self._info = None
self._sustainability = None
self._recommendations = None
self._major_holders = None
self._institutional_holders = None
self._calendar = None
self._expirations = {}
self._earnings = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._financials = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._balancesheet = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
self._cashflow = {
"yearly": utils.empty_df(),
"quarterly": utils.empty_df()}
def history(self, period="1mo", interval="1d",
start=None, end=None, prepost=False, actions=True,
auto_adjust=True, back_adjust=False,
proxy=None, rounding=True, tz=None, **kwargs):
"""
:Parameters:
period : str
Valid periods: 1d,5d,1mo,3mo,6mo,1y,2y,5y,10y,ytd,max
Either Use period parameter or use start and end
interval : str
Valid intervals: 1m,2m,5m,15m,30m,60m,90m,1h,1d,5d,1wk,1mo,3mo
Intraday data cannot extend last 60 days
start: str
Download start date string (YYYY-MM-DD) or _datetime.
Default is 1900-01-01
end: str
Download end date string (YYYY-MM-DD) or _datetime.
Default is now
prepost : bool
Include Pre and Post market data in results?
Default is False
auto_adjust: bool
Adjust all OHLC automatically? Default is True
back_adjust: bool
Back-adjusted data to mimic true historical prices
proxy: str
Optional. Proxy server URL scheme. Default is None
rounding: bool
Round values to 2 decimal places?
Optional. Default is False = precision suggested by Yahoo!
tz: str
Optional timezone locale for dates.
(default data is returned as non-localized dates)
**kwargs: dict
debug: bool
Optional. If passed as False, will suppress
error message printing to console.
"""
if start or period is None or period.lower() == "max":
if start is None:
start = -2208988800
elif isinstance(start, _datetime.datetime):
start = int(_time.mktime(start.timetuple()))
else:
start = int(_time.mktime(
_time.strptime(str(start), '%Y-%m-%d')))
if end is None:
end = int(_time.time())
elif isinstance(end, _datetime.datetime):
end = int(_time.mktime(end.timetuple()))
else:
end = int(_time.mktime(_time.strptime(str(end), '%Y-%m-%d')))
params = {"period1": start, "period2": end}
else:
period = period.lower()
params = {"range": period}
params["interval"] = interval.lower()
params["includePrePost"] = prepost
params["events"] = "div,splits"
# 1) fix weired bug with Yahoo! - returning 60m for 30m bars
if params["interval"] == "30m":
params["interval"] = "15m"
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
# Getting data from json
url = "{}/v8/finance/chart/{}".format(self._base_url, self.ticker)
data = _requests.get(url=url, params=params, proxies=proxy)
if "Will be right back" in data.text:
raise RuntimeError("*** YAHOO! FINANCE IS CURRENTLY DOWN! ***\n"
"Our engineers are working quickly to resolve "
"the issue. Thank you for your patience.")
data = data.json()
# Work with errors
debug_mode = True
if "debug" in kwargs and isinstance(kwargs["debug"], bool):
debug_mode = kwargs["debug"]
err_msg = "No data found for this date range, symbol may be delisted"
if "chart" in data and data["chart"]["error"]:
err_msg = data["chart"]["error"]["description"]
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
elif "chart" not in data or data["chart"]["result"] is None or \
not data["chart"]["result"]:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# parse quotes
try:
quotes = utils.parse_quotes(data["chart"]["result"][0], tz)
except Exception:
shared._DFS[self.ticker] = utils.empty_df()
shared._ERRORS[self.ticker] = err_msg
if "many" not in kwargs and debug_mode:
print('- %s: %s' % (self.ticker, err_msg))
return shared._DFS[self.ticker]
# 2) fix weired bug with Yahoo! - returning 60m for 30m bars
if interval.lower() == "30m":
quotes2 = quotes.resample('30T')
quotes = _pd.DataFrame(index=quotes2.last().index, data={
'Open': quotes2['Open'].first(),
'High': quotes2['High'].max(),
'Low': quotes2['Low'].min(),
'Close': quotes2['Close'].last(),
'Adj Close': quotes2['Adj Close'].last(),
'Volume': quotes2['Volume'].sum()
})
try:
quotes['Dividends'] = quotes2['Dividends'].max()
except Exception:
pass
try:
quotes['Stock Splits'] = quotes2['Dividends'].max()
except Exception:
pass
if auto_adjust:
quotes = utils.auto_adjust(quotes)
elif back_adjust:
quotes = utils.back_adjust(quotes)
if rounding:
quotes = _np.round(quotes, data[
"chart"]["result"][0]["meta"]["priceHint"])
quotes['Volume'] = quotes['Volume'].fillna(0).astype(_np.int64)
quotes.dropna(inplace=True)
# actions
dividends, splits = utils.parse_actions(data["chart"]["result"][0], tz)
# combine
df = _pd.concat([quotes, dividends, splits], axis=1, sort=True)
df["Dividends"].fillna(0, inplace=True)
df["Stock Splits"].fillna(0, inplace=True)
# index eod/intraday
df.index = df.index.tz_localize("UTC").tz_convert(
data["chart"]["result"][0]["meta"]["exchangeTimezoneName"])
if params["interval"][-1] == "m":
df.index.name = "Datetime"
else:
df.index = _pd.to_datetime(df.index.date)
if tz is not None:
df.index = df.index.tz_localize(tz)
df.index.name = "Date"
self._history = df.copy()
if not actions:
df.drop(columns=["Dividends", "Stock Splits"], inplace=True)
return df
# ------------------------
def _get_fundamentals(self, kind=None, proxy=None):
def cleanup(data):
df = _pd.DataFrame(data).drop(columns=['maxAge'])
for col in df.columns:
df[col] = _np.where(
df[col].astype(str) == '-', _np.nan, df[col])
df.set_index('endDate', inplace=True)
try:
df.index = _pd.to_datetime(df.index, unit='s')
except ValueError:
df.index = _pd.to_datetime(df.index)
df = df.T
df.columns.name = ''
df.index.name = 'Breakdown'
df.index = utils.camel2title(df.index)
return df
# setup proxy in requests format
if proxy is not None:
if isinstance(proxy, dict) and "https" in proxy:
proxy = proxy["https"]
proxy = {"https": proxy}
if self._fundamentals:
return
# get info and sustainability
url = '%s/%s' % (self._scrape_url, self.ticker)
data = utils.get_json(url, proxy)
# holders
holders = _pd.read_html('https://finance.yahoo.com/quote/MSFT/holders')
self._major_holders = holders[0]
self._institutional_holders = holders[1]
self._institutional_holders['Date Reported'] = _pd.to_datetime(
self._institutional_holders['Date Reported'])
self._institutional_holders['% Out'] = self._institutional_holders[
'% Out'].str.replace('%', '').astype(float)/100
# sustainability
d = {}
if isinstance(data.get('esgScores'), dict):
for item in data['esgScores']:
if not isinstance(data['esgScores'][item], (dict, list)):
d[item] = data['esgScores'][item]
s = _pd.DataFrame(index=[0], data=d)[-1:].T
s.columns = ['Value']
s.index.name = '%.f-%.f' % (
s[s.index == 'ratingYear']['Value'].values[0],
s[s.index == 'ratingMonth']['Value'].values[0])
self._sustainability = s[~s.index.isin(
['maxAge', 'ratingYear', 'ratingMonth'])]
# info (be nice to python 2)
self._info = {}
items = ['summaryProfile', 'summaryDetail', 'quoteType',
'defaultKeyStatistics', 'assetProfile', 'summaryDetail']
for item in items:
if isinstance(data.get(item), dict):
self._info.update(data[item])
self._info['regularMarketPrice'] = self._info['regularMarketOpen']
self._info['logo_url'] = ""
try:
domain = self._info['website'].split(
'://')[1].split('/')[0].replace('www.', '')
self._info['logo_url'] = 'https://logo.clearbit.com/%s' % domain
except Exception:
pass
# events
try:
cal = _pd.DataFrame(
data['calendarEvents']['earnings'])
cal['earningsDate'] = _pd.to_datetime(
cal['earningsDate'], unit='s')
self._calendar = cal.T
self._calendar.index = utils.camel2title(self._calendar.index)
self._calendar.columns = ['Value']
except Exception:
pass
# analyst recommendations
try:
rec = _pd.DataFrame(
data['upgradeDowngradeHistory']['history'])
rec['earningsDate'] = _pd.to_datetime(
rec['epochGradeDate'], unit='s')
rec.set_index('earningsDate', inplace=True)
rec.index.name = 'Date'
rec.columns = utils.camel2title(rec.columns)
self._recommendations = rec[[
'Firm', 'To Grade', 'From Grade', 'Action']].sort_index()
except Exception:
pass
# get fundamentals
data = utils.get_json(url+'/financials', proxy)
# generic patterns
for key in (
(self._cashflow, 'cashflowStatement', 'cashflowStatements'),
(self._balancesheet, 'balanceSheet', 'balanceSheetStatements'),
(self._financials, 'incomeStatement', 'incomeStatementHistory')
):
item = key[1] + 'History'
if isinstance(data.get(item), dict):
key[0]['yearly'] = cleanup(data[item][key[2]])
item = key[1]+'HistoryQuarterly'
if isinstance(data.get(item), dict):
key[0]['quarterly'] = cleanup(data[item][key[2]])
# earnings
if isinstance(data.get('earnings'), dict):
earnings = data['earnings']['financialsChart']
df = _pd.DataFrame(earnings['yearly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Year'
self._earnings['yearly'] = df
df = _pd.DataFrame(earnings['quarterly']).set_index('date')
df.columns = utils.camel2title(df.columns)
df.index.name = 'Quarter'
self._earnings['quarterly'] = df
self._fundamentals = True
def get_recommendations(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._recommendations
if as_dict:
return data.to_dict()
return data
def get_calendar(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._calendar
if as_dict:
return data.to_dict()
return data
def get_major_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._major_holders
if as_dict:
return data.to_dict()
return data
def get_institutional_holders(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._institutional_holders
if as_dict:
return data.to_dict()
return data
def get_info(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._info
if as_dict:
return data.to_dict()
return data
def get_sustainability(self, proxy=None, as_dict=False, *args, **kwargs):
self._get_fundamentals(proxy)
data = self._sustainability
if as_dict:
return data.to_dict()
return data
def get_earnings(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._earnings[freq]
if as_dict:
return data.to_dict()
return data
def get_financials(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._financials[freq]
if as_dict:
return data.to_dict()
return data
def get_balancesheet(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._balancesheet[freq]
if as_dict:
return data.to_dict()
return data
def get_balance_sheet(self, proxy=None, as_dict=False, freq="yearly"):
return self.get_balancesheet(proxy, as_dict, freq)
def get_cashflow(self, proxy=None, as_dict=False, freq="yearly"):
self._get_fundamentals(proxy)
data = self._cashflow[freq]
if as_dict:
return data.to_dict()
return data
def get_dividends(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
dividends = self._history["Dividends"]
return dividends[dividends != 0]
def get_splits(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
splits = self._history["Stock Splits"]
return splits[splits != 0]
def get_actions(self, proxy=None):
if self._history is None:
self.history(period="max", proxy=proxy)
actions = self._history[["Dividends", "Stock Splits"]]
return actions[actions != 0].dropna(how='all').fillna(0)
|
py
|
1a5cfb50cb43a5442a62d5dc86251ac7d1115b0c
|
# Copyright (c) 2016-2021 The Regents of the University of Michigan
# This file is part of the General Simulation Data (GSD) project, released under
# the BSD 2-Clause License.
"""Read and write HOOMD schema GSD files.
The main package :py:mod:`gsd.hoomd` is a reference implementation of the
GSD schema ``hoomd``. It is a simple, but high performance and memory
efficient, reader and writer for the schema. See :ref:`hoomd-examples`
for full examples.
* `open` - Open a hoomd schema GSD file.
* `HOOMDTrajectory` - Read and write hoomd schema GSD files.
* `Snapshot` - Store the state of a single frame.
* `ConfigurationData` - Store configuration data in a snapshot.
* `ParticleData` - Store particle data in a snapshot.
* `BondData` - Store topology data in a snapshot.
"""
import numpy
from collections import OrderedDict
import logging
import json
try:
from gsd import fl
except ImportError:
fl = None
try:
import gsd
except ImportError:
gsd = None
logger = logging.getLogger('gsd.hoomd')
class ConfigurationData(object):
"""Store configuration data.
Use the `Snapshot.configuration` attribute of a to access the configuration.
Attributes:
step (int): Time step of this frame (:chunk:`configuration/step`).
dimensions (int): Number of dimensions
(:chunk:`configuration/dimensions`). When not set explicitly,
dimensions will default to different values based on the value of
:math:`L_z` in `box`. When :math:`L_z = 0` dimensions will default
to 2, otherwise 3. User set values always take precedence.
"""
_default_value = OrderedDict()
_default_value['step'] = numpy.uint64(0)
_default_value['dimensions'] = numpy.uint8(3)
_default_value['box'] = numpy.array([1, 1, 1, 0, 0, 0], dtype=numpy.float32)
def __init__(self):
self.step = None
self.dimensions = None
self._box = None
@property
def box(self):
"""((6, 1) `numpy.ndarray` of ``numpy.float32``): Box dimensions \
(:chunk:`configuration/box`).
[lx, ly, lz, xy, xz, yz].
"""
return self._box
@box.setter
def box(self, box):
self._box = box
try:
Lz = box[2]
except TypeError:
return
else:
if self.dimensions is None:
self.dimensions = 2 if Lz == 0 else 3
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ConfigurationData')
if self.box is not None:
self.box = numpy.ascontiguousarray(self.box, dtype=numpy.float32)
self.box = self.box.reshape([6])
class ParticleData(object):
"""Store particle data chunks.
Use the `Snapshot.particles` attribute of a to access the particles.
Instances resulting from file read operations will always store array
quantities in `numpy.ndarray` objects of the defined types. User created
snapshots may provide input data that can be converted to a `numpy.ndarray`.
Attributes:
N (int): Number of particles in the snapshot (:chunk:`particles/N`).
types (`typing.List` [str]):
Names of the particle types (:chunk:`particles/types`).
position ((*N*, 3) `numpy.ndarray` of ``numpy.float32``):
Particle position (:chunk:`particles/position`).
orientation ((*N*, 4) `numpy.ndarray` of ``numpy.float32``):
Particle orientation. (:chunk:`particles/orientation`).
typeid ((*N*, ) `numpy.ndarray` of ``numpy.uint32``):
Particle type id (:chunk:`particles/typeid`).
mass ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Particle mass (:chunk:`particles/mass`).
charge ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Particle charge (:chunk:`particles/charge`).
diameter ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Particle diameter (:chunk:`particles/diameter`).
body ((*N*, ) `numpy.ndarray` of ``numpy.int32``):
Particle body (:chunk:`particles/body`).
moment_inertia ((*N*, 3) `numpy.ndarray` of ``numpy.float32``):
Particle moment of inertia (:chunk:`particles/moment_inertia`).
velocity ((*N*, 3) `numpy.ndarray` of ``numpy.float32``):
Particle velocity (:chunk:`particles/velocity`).
angmom ((*N*, 4) `numpy.ndarray` of ``numpy.float32``):
Particle angular momentum (:chunk:`particles/angmom`).
image ((*N*, 3) `numpy.ndarray` of ``numpy.int32``):
Particle image (:chunk:`particles/image`).
type_shapes (`typing.List` [`typing.Dict`]): Shape specifications for
visualizing particle types (:chunk:`particles/type_shapes`).
"""
_default_value = OrderedDict()
_default_value['N'] = numpy.uint32(0)
_default_value['types'] = ['A']
_default_value['typeid'] = numpy.uint32(0)
_default_value['mass'] = numpy.float32(1.0)
_default_value['charge'] = numpy.float32(0)
_default_value['diameter'] = numpy.float32(1.0)
_default_value['body'] = numpy.int32(-1)
_default_value['moment_inertia'] = numpy.array([0, 0, 0],
dtype=numpy.float32)
_default_value['position'] = numpy.array([0, 0, 0], dtype=numpy.float32)
_default_value['orientation'] = numpy.array([1, 0, 0, 0],
dtype=numpy.float32)
_default_value['velocity'] = numpy.array([0, 0, 0], dtype=numpy.float32)
_default_value['angmom'] = numpy.array([0, 0, 0, 0], dtype=numpy.float32)
_default_value['image'] = numpy.array([0, 0, 0], dtype=numpy.int32)
_default_value['type_shapes'] = [{}]
def __init__(self):
self.N = 0
self.position = None
self.orientation = None
self.types = None
self.typeid = None
self.mass = None
self.charge = None
self.diameter = None
self.body = None
self.moment_inertia = None
self.velocity = None
self.angmom = None
self.image = None
self.type_shapes = None
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ParticleData')
if self.position is not None:
self.position = numpy.ascontiguousarray(self.position,
dtype=numpy.float32)
self.position = self.position.reshape([self.N, 3])
if self.orientation is not None:
self.orientation = numpy.ascontiguousarray(self.orientation,
dtype=numpy.float32)
self.orientation = self.orientation.reshape([self.N, 4])
if self.typeid is not None:
self.typeid = numpy.ascontiguousarray(self.typeid,
dtype=numpy.uint32)
self.typeid = self.typeid.reshape([self.N])
if self.mass is not None:
self.mass = numpy.ascontiguousarray(self.mass, dtype=numpy.float32)
self.mass = self.mass.reshape([self.N])
if self.charge is not None:
self.charge = numpy.ascontiguousarray(self.charge,
dtype=numpy.float32)
self.charge = self.charge.reshape([self.N])
if self.diameter is not None:
self.diameter = numpy.ascontiguousarray(self.diameter,
dtype=numpy.float32)
self.diameter = self.diameter.reshape([self.N])
if self.body is not None:
self.body = numpy.ascontiguousarray(self.body, dtype=numpy.int32)
self.body = self.body.reshape([self.N])
if self.moment_inertia is not None:
self.moment_inertia = numpy.ascontiguousarray(self.moment_inertia,
dtype=numpy.float32)
self.moment_inertia = self.moment_inertia.reshape([self.N, 3])
if self.velocity is not None:
self.velocity = numpy.ascontiguousarray(self.velocity,
dtype=numpy.float32)
self.velocity = self.velocity.reshape([self.N, 3])
if self.angmom is not None:
self.angmom = numpy.ascontiguousarray(self.angmom,
dtype=numpy.float32)
self.angmom = self.angmom.reshape([self.N, 4])
if self.image is not None:
self.image = numpy.ascontiguousarray(self.image, dtype=numpy.int32)
self.image = self.image.reshape([self.N, 3])
class BondData(object):
"""Store bond data chunks.
Use the `Snapshot.bonds`, `Snapshot.angles`, `Snapshot.dihedrals`,
`Snapshot.impropers`, and `Snapshot.pairs` attributes to access the bonds.
Instances resulting from file read operations will always store array
quantities in `numpy.ndarray` objects of the defined types. User created
snapshots may provide input data that can be converted to a `numpy.ndarray`.
Note:
*M* varies depending on the type of bond. `BondData` represents all
types of bonds.
======== ===
Type *M*
======== ===
Bond 2
Angle 3
Dihedral 4
Improper 4
Pair 2
======== ===
Attributes:
N (int): Number of particles in the snapshot
(:chunk:`bonds/N`, :chunk:`angles/N`, :chunk:`dihedrals/N`,
:chunk:`impropers/N`, :chunk:`pairs/N`).
types (`typing.List` [str]): Names of the particle types
(:chunk:`bonds/types`, :chunk:`angles/types`,
:chunk:`dihedrals/types`, :chunk:`impropers/types`,
:chunk:`pairs/types`).
typeid ((*N*, 3) `numpy.ndarray` of ``numpy.uint32``):
Bond type id (:chunk:`bonds/typeid`,
:chunk:`angles/typeid`, :chunk:`dihedrals/typeid`,
:chunk:`impropers/typeid`, :chunk:`pairs/types`).
group ((*N*, *M*) `numpy.ndarray` of ``numpy.uint32``):
Tags of the particles in the bond (:chunk:`bonds/group`,
:chunk:`angles/group`, :chunk:`dihedrals/group`,
:chunk:`impropers/group`, :chunk:`pairs/group`).
"""
def __init__(self, M):
self.M = M
self.N = 0
self.types = None
self.typeid = None
self.group = None
self._default_value = OrderedDict()
self._default_value['N'] = numpy.uint32(0)
self._default_value['types'] = []
self._default_value['typeid'] = numpy.uint32(0)
self._default_value['group'] = numpy.array([0] * M, dtype=numpy.int32)
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating BondData')
if self.typeid is not None:
self.typeid = numpy.ascontiguousarray(self.typeid,
dtype=numpy.uint32)
self.typeid = self.typeid.reshape([self.N])
if self.group is not None:
self.group = numpy.ascontiguousarray(self.group, dtype=numpy.int32)
self.group = self.group.reshape([self.N, self.M])
class ConstraintData(object):
"""Store constraint data chunks.
Use the `Snapshot.constraints` attribute to access the constraints.
Instances resulting from file read operations will always store array
quantities in `numpy.ndarray` objects of the defined types. User created
snapshots may provide input data that can be converted to a `numpy.ndarray`.
Attributes:
N (int): Number of particles in the snapshot (:chunk:`constraints/N`).
value ((*N*, ) `numpy.ndarray` of ``numpy.float32``):
Constraint length (:chunk:`constraints/value`).
group ((*N*, *2*) `numpy.ndarray` of ``numpy.uint32``):
Tags of the particles in the constraint
(:chunk:`constraints/group`).
"""
def __init__(self):
self.M = 2
self.N = 0
self.value = None
self.group = None
self._default_value = OrderedDict()
self._default_value['N'] = numpy.uint32(0)
self._default_value['value'] = numpy.float32(0)
self._default_value['group'] = numpy.array([0] * self.M,
dtype=numpy.int32)
def validate(self):
"""Validate all attributes.
Convert every array attribute to a `numpy.ndarray` of the proper
type and check that all attributes have the correct dimensions.
Ignore any attributes that are ``None``.
Warning:
Array attributes that are not contiguous numpy arrays will be
replaced with contiguous numpy arrays of the appropriate type.
"""
logger.debug('Validating ConstraintData')
if self.value is not None:
self.value = numpy.ascontiguousarray(self.value,
dtype=numpy.float32)
self.value = self.value.reshape([self.N])
if self.group is not None:
self.group = numpy.ascontiguousarray(self.group, dtype=numpy.int32)
self.group = self.group.reshape([self.N, self.M])
class Snapshot(object):
"""Snapshot of a system state.
Attributes:
configuration (`ConfigurationData`): Configuration data.
particles (`ParticleData`): Particles.
bonds (`BondData`): Bonds.
angles (`BondData`): Angles.
dihedrals (`BondData`): Dihedrals.
impropers (`BondData`): Impropers.
pairs (`BondData`): Special pair.
constraints (`ConstraintData`): Distance constraints.
state (typing.Dict): State data.
log (typing.Dict): Logged data (values must be `numpy.ndarray` or
`array_like`)
"""
def __init__(self):
self.configuration = ConfigurationData()
self.particles = ParticleData()
self.bonds = BondData(2)
self.angles = BondData(3)
self.dihedrals = BondData(4)
self.impropers = BondData(4)
self.constraints = ConstraintData()
self.pairs = BondData(2)
self.state = {}
self.log = {}
self._valid_state = [
'hpmc/integrate/d',
'hpmc/integrate/a',
'hpmc/sphere/radius',
'hpmc/sphere/orientable',
'hpmc/ellipsoid/a',
'hpmc/ellipsoid/b',
'hpmc/ellipsoid/c',
'hpmc/convex_polyhedron/N',
'hpmc/convex_polyhedron/vertices',
'hpmc/convex_spheropolyhedron/N',
'hpmc/convex_spheropolyhedron/vertices',
'hpmc/convex_spheropolyhedron/sweep_radius',
'hpmc/convex_polygon/N',
'hpmc/convex_polygon/vertices',
'hpmc/convex_spheropolygon/N',
'hpmc/convex_spheropolygon/vertices',
'hpmc/convex_spheropolygon/sweep_radius',
'hpmc/simple_polygon/N',
'hpmc/simple_polygon/vertices',
]
def validate(self):
"""Validate all contained snapshot data."""
logger.debug('Validating Snapshot')
self.configuration.validate()
self.particles.validate()
self.bonds.validate()
self.angles.validate()
self.dihedrals.validate()
self.impropers.validate()
self.constraints.validate()
self.pairs.validate()
# validate HPMC state
if self.particles.types is not None:
NT = len(self.particles.types)
else:
NT = 1
if 'hpmc/integrate/d' in self.state:
self.state['hpmc/integrate/d'] = \
numpy.ascontiguousarray(self.state['hpmc/integrate/d'],
dtype=numpy.float64)
self.state['hpmc/integrate/d'] = \
self.state['hpmc/integrate/d'].reshape([1])
if 'hpmc/integrate/a' in self.state:
self.state['hpmc/integrate/a'] = \
numpy.ascontiguousarray(self.state['hpmc/integrate/a'],
dtype=numpy.float64)
self.state['hpmc/integrate/a'] = \
self.state['hpmc/integrate/a'].reshape([1])
if 'hpmc/sphere/radius' in self.state:
self.state['hpmc/sphere/radius'] = \
numpy.ascontiguousarray(self.state['hpmc/sphere/radius'],
dtype=numpy.float32)
self.state['hpmc/sphere/radius'] = \
self.state['hpmc/sphere/radius'].reshape([NT])
if 'hpmc/sphere/orientable' in self.state:
self.state['hpmc/sphere/orientable'] = \
numpy.ascontiguousarray(self.state['hpmc/sphere/orientable'],
dtype=numpy.uint8)
self.state['hpmc/sphere/orientable'] = \
self.state['hpmc/sphere/orientable'].reshape([NT])
if 'hpmc/ellipsoid/a' in self.state:
self.state['hpmc/ellipsoid/a'] = \
numpy.ascontiguousarray(self.state['hpmc/ellipsoid/a'],
dtype=numpy.float32)
self.state['hpmc/ellipsoid/a'] = \
self.state['hpmc/ellipsoid/a'].reshape([NT])
self.state['hpmc/ellipsoid/b'] = \
numpy.ascontiguousarray(self.state['hpmc/ellipsoid/b'],
dtype=numpy.float32)
self.state['hpmc/ellipsoid/b'] = \
self.state['hpmc/ellipsoid/b'].reshape([NT])
self.state['hpmc/ellipsoid/c'] = \
numpy.ascontiguousarray(self.state['hpmc/ellipsoid/c'],
dtype=numpy.float32)
self.state['hpmc/ellipsoid/c'] = \
self.state['hpmc/ellipsoid/c'].reshape([NT])
if 'hpmc/convex_polyhedron/N' in self.state:
self.state['hpmc/convex_polyhedron/N'] = \
numpy.ascontiguousarray(self.state['hpmc/convex_polyhedron/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_polyhedron/N'] = \
self.state['hpmc/convex_polyhedron/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_polyhedron/N'])
self.state['hpmc/convex_polyhedron/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_polyhedron/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_polyhedron/vertices'] = \
self.state['hpmc/convex_polyhedron/vertices'].reshape([sumN, 3])
if 'hpmc/convex_spheropolyhedron/N' in self.state:
self.state['hpmc/convex_spheropolyhedron/N'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolyhedron/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_spheropolyhedron/N'] = \
self.state['hpmc/convex_spheropolyhedron/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_spheropolyhedron/N'])
self.state['hpmc/convex_spheropolyhedron/sweep_radius'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolyhedron/sweep_radius'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolyhedron/sweep_radius'] = \
self.state[
'hpmc/convex_spheropolyhedron/sweep_radius'].reshape([NT])
self.state['hpmc/convex_spheropolyhedron/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolyhedron/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolyhedron/vertices'] = \
self.state[
'hpmc/convex_spheropolyhedron/vertices'].reshape([sumN, 3])
if 'hpmc/convex_polygon/N' in self.state:
self.state['hpmc/convex_polygon/N'] = \
numpy.ascontiguousarray(self.state['hpmc/convex_polygon/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_polygon/N'] = \
self.state['hpmc/convex_polygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_polygon/N'])
self.state['hpmc/convex_polygon/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_polygon/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_polygon/vertices'] = \
self.state['hpmc/convex_polygon/vertices'].reshape([sumN, 2])
if 'hpmc/convex_spheropolygon/N' in self.state:
self.state['hpmc/convex_spheropolygon/N'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolygon/N'],
dtype=numpy.uint32)
self.state['hpmc/convex_spheropolygon/N'] = \
self.state['hpmc/convex_spheropolygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/convex_spheropolygon/N'])
self.state['hpmc/convex_spheropolygon/sweep_radius'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolygon/sweep_radius'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolygon/sweep_radius'] = \
self.state[
'hpmc/convex_spheropolygon/sweep_radius'].reshape([NT])
self.state['hpmc/convex_spheropolygon/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/convex_spheropolygon/vertices'],
dtype=numpy.float32)
self.state['hpmc/convex_spheropolygon/vertices'] = \
self.state[
'hpmc/convex_spheropolygon/vertices'].reshape([sumN, 2])
if 'hpmc/simple_polygon/N' in self.state:
self.state['hpmc/simple_polygon/N'] = \
numpy.ascontiguousarray(self.state['hpmc/simple_polygon/N'],
dtype=numpy.uint32)
self.state['hpmc/simple_polygon/N'] = \
self.state['hpmc/simple_polygon/N'].reshape([NT])
sumN = numpy.sum(self.state['hpmc/simple_polygon/N'])
self.state['hpmc/simple_polygon/vertices'] = \
numpy.ascontiguousarray(
self.state['hpmc/simple_polygon/vertices'],
dtype=numpy.float32)
self.state['hpmc/simple_polygon/vertices'] = \
self.state[
'hpmc/simple_polygon/vertices'].reshape([sumN, 2])
for k in self.state:
if k not in self._valid_state:
raise RuntimeError('Not a valid state: ' + k)
class _HOOMDTrajectoryIterable(object):
"""Iterable over a HOOMDTrajectory object."""
def __init__(self, trajectory, indices):
self._trajectory = trajectory
self._indices = indices
self._indices_iterator = iter(indices)
def __next__(self):
return self._trajectory[next(self._indices_iterator)]
next = __next__ # Python 2.7 compatibility
def __iter__(self):
return type(self)(self._trajectory, self._indices)
def __len__(self):
return len(self._indices)
class _HOOMDTrajectoryView(object):
"""A view of a HOOMDTrajectory object.
Enables the slicing and iteration over a subset of a trajectory
instance.
"""
def __init__(self, trajectory, indices):
self._trajectory = trajectory
self._indices = indices
def __iter__(self):
return _HOOMDTrajectoryIterable(self._trajectory, self._indices)
def __len__(self):
return len(self._indices)
def __getitem__(self, key):
if isinstance(key, slice):
return type(self)(self._trajectory, self._indices[key])
else:
return self._trajectory[self._indices[key]]
class HOOMDTrajectory(object):
"""Read and write hoomd gsd files.
Args:
file (`gsd.fl.GSDFile`): File to access.
Open hoomd GSD files with `open`.
"""
def __init__(self, file):
if file.mode == 'ab':
raise ValueError('Append mode not yet supported')
self._file = file
self._initial_frame = None
logger.info('opening HOOMDTrajectory: ' + str(self.file))
if self.file.schema != 'hoomd':
raise RuntimeError('GSD file is not a hoomd schema file: '
+ str(self.file))
valid = False
version = self.file.schema_version
if (version < (2, 0) and version >= (1, 0)):
valid = True
if not valid:
raise RuntimeError('Incompatible hoomd schema version '
+ str(version) + ' in: ' + str(self.file))
logger.info('found ' + str(len(self)) + ' frames')
@property
def file(self):
""":class:`gsd.fl.GSDFile`: The underlying file handle."""
return self._file
def __len__(self):
"""The number of frames in the trajectory."""
return self.file.nframes
def append(self, snapshot):
"""Append a snapshot to a hoomd gsd file.
Args:
snapshot (:py:class:`Snapshot`): Snapshot to append.
Write the given snapshot to the file at the current frame and increase
the frame counter. Do not write any fields that are ``None``. For all
non-``None`` fields, scan them and see if they match the initial frame
or the default value. If the given data differs, write it out to the
frame. If it is the same, do not write it out as it can be instantiated
either from the value at the initial frame or the default value.
"""
logger.debug('Appending snapshot to hoomd trajectory: '
+ str(self.file))
snapshot.validate()
# want the initial frame specified as a reference to detect if chunks
# need to be written
if self._initial_frame is None and len(self) > 0:
self.read_frame(0)
for path in [
'configuration',
'particles',
'bonds',
'angles',
'dihedrals',
'impropers',
'constraints',
'pairs',
]:
container = getattr(snapshot, path)
for name in container._default_value:
if self._should_write(path, name, snapshot):
logger.debug('writing data chunk: ' + path + '/' + name)
data = getattr(container, name)
if name == 'N':
data = numpy.array([data], dtype=numpy.uint32)
if name == 'step':
data = numpy.array([data], dtype=numpy.uint64)
if name == 'dimensions':
data = numpy.array([data], dtype=numpy.uint8)
if name in ('types', 'type_shapes'):
if name == 'type_shapes':
data = [
json.dumps(shape_dict) for shape_dict in data
]
wid = max(len(w) for w in data) + 1
b = numpy.array(data, dtype=numpy.dtype((bytes, wid)))
data = b.view(dtype=numpy.int8).reshape(len(b), wid)
self.file.write_chunk(path + '/' + name, data)
# write state data
for state, data in snapshot.state.items():
self.file.write_chunk('state/' + state, data)
# write log data
for log, data in snapshot.log.items():
self.file.write_chunk('log/' + log, data)
self.file.end_frame()
def truncate(self):
"""Remove all frames from the file."""
self.file.truncate()
self._initial_frame = None
def close(self):
"""Close the file."""
self.file.close()
del self._initial_frame
def _should_write(self, path, name, snapshot):
"""Test if we should write a given data chunk.
Args:
path (str): Path part of the data chunk.
name (str): Name part of the data chunk.
snapshot (:py:class:`Snapshot`): Snapshot data is from.
Returns:
False if the data matches that in the initial frame. False
if the data matches all default values. True otherwise.
"""
container = getattr(snapshot, path)
data = getattr(container, name)
if data is None:
return False
if self._initial_frame is not None:
initial_container = getattr(self._initial_frame, path)
initial_data = getattr(initial_container, name)
if numpy.array_equal(initial_data, data):
logger.debug('skipping data chunk, matches frame 0: ' + path
+ '/' + name)
return False
if numpy.array_equiv(data, container._default_value[name]):
logger.debug('skipping data chunk, default value: ' + path + '/'
+ name)
return False
return True
def extend(self, iterable):
"""Append each item of the iterable to the file.
Args:
iterable: An iterable object the provides :py:class:`Snapshot`
instances. This could be another HOOMDTrajectory, a generator
that modifies snapshots, or a simple list of snapshots.
"""
for item in iterable:
self.append(item)
def read_frame(self, idx):
"""Read the frame at the given index from the file.
Args:
idx (int): Frame index to read.
Returns:
`Snapshot` with the frame data
Replace any data chunks not present in the given frame with either data
from frame 0, or initialize from default values if not in frame 0. Cache
frame 0 data to avoid file read overhead. Return any default data as
non-writable numpy arrays.
"""
if idx >= len(self):
raise IndexError
logger.debug('reading frame ' + str(idx) + ' from: ' + str(self.file))
if self._initial_frame is None and idx != 0:
self.read_frame(0)
snap = Snapshot()
# read configuration first
if self.file.chunk_exists(frame=idx, name='configuration/step'):
step_arr = self.file.read_chunk(frame=idx,
name='configuration/step')
snap.configuration.step = step_arr[0]
else:
if self._initial_frame is not None:
snap.configuration.step = self._initial_frame.configuration.step
else:
snap.configuration.step = \
snap.configuration._default_value['step']
if self.file.chunk_exists(frame=idx, name='configuration/dimensions'):
dimensions_arr = self.file.read_chunk(
frame=idx, name='configuration/dimensions')
snap.configuration.dimensions = dimensions_arr[0]
else:
if self._initial_frame is not None:
snap.configuration.dimensions = \
self._initial_frame.configuration.dimensions
else:
snap.configuration.dimensions = \
snap.configuration._default_value['dimensions']
if self.file.chunk_exists(frame=idx, name='configuration/box'):
snap.configuration.box = self.file.read_chunk(
frame=idx, name='configuration/box')
else:
if self._initial_frame is not None:
snap.configuration.box = self._initial_frame.configuration.box
else:
snap.configuration.box = \
snap.configuration._default_value['box']
# then read all groups that have N, types, etc...
for path in [
'particles',
'bonds',
'angles',
'dihedrals',
'impropers',
'constraints',
'pairs',
]:
container = getattr(snap, path)
if self._initial_frame is not None:
initial_frame_container = getattr(self._initial_frame, path)
container.N = 0
if self.file.chunk_exists(frame=idx, name=path + '/N'):
N_arr = self.file.read_chunk(frame=idx, name=path + '/N')
container.N = N_arr[0]
else:
if self._initial_frame is not None:
container.N = initial_frame_container.N
# type names
if 'types' in container._default_value:
if self.file.chunk_exists(frame=idx, name=path + '/types'):
tmp = self.file.read_chunk(frame=idx, name=path + '/types')
tmp = tmp.view(dtype=numpy.dtype((bytes, tmp.shape[1])))
tmp = tmp.reshape([tmp.shape[0]])
container.types = list(a.decode('UTF-8') for a in tmp)
else:
if self._initial_frame is not None:
container.types = initial_frame_container.types
else:
container.types = container._default_value['types']
# type shapes
if ('type_shapes' in container._default_value
and path == 'particles'):
if self.file.chunk_exists(frame=idx,
name=path + '/type_shapes'):
tmp = self.file.read_chunk(frame=idx,
name=path + '/type_shapes')
tmp = tmp.view(dtype=numpy.dtype((bytes, tmp.shape[1])))
tmp = tmp.reshape([tmp.shape[0]])
container.type_shapes = \
list(json.loads(json_string.decode('UTF-8'))
for json_string in tmp)
else:
if self._initial_frame is not None:
container.type_shapes = \
initial_frame_container.type_shapes
else:
container.type_shapes = \
container._default_value['type_shapes']
for name in container._default_value:
if name in ('N', 'types', 'type_shapes'):
continue
# per particle/bond quantities
if self.file.chunk_exists(frame=idx, name=path + '/' + name):
container.__dict__[name] = self.file.read_chunk(
frame=idx, name=path + '/' + name)
else:
if (self._initial_frame is not None
and initial_frame_container.N == container.N):
# read default from initial frame
container.__dict__[name] = \
initial_frame_container.__dict__[name]
else:
# initialize from default value
tmp = numpy.array([container._default_value[name]])
s = list(tmp.shape)
s[0] = container.N
container.__dict__[name] = numpy.empty(shape=s,
dtype=tmp.dtype)
container.__dict__[name][:] = tmp
container.__dict__[name].flags.writeable = False
# read state data
for state in snap._valid_state:
if self.file.chunk_exists(frame=idx, name='state/' + state):
snap.state[state] = self.file.read_chunk(frame=idx,
name='state/' + state)
# read log data
logged_data_names = self.file.find_matching_chunk_names('log/')
for log in logged_data_names:
if self.file.chunk_exists(frame=idx, name=log):
snap.log[log[4:]] = self.file.read_chunk(frame=idx, name=log)
else:
if self._initial_frame is not None:
snap.log[log[4:]] = self._initial_frame.log[log[4:]]
# store initial frame
if self._initial_frame is None and idx == 0:
self._initial_frame = snap
return snap
def __getitem__(self, key):
"""Index trajectory frames.
The index can be a positive integer, negative integer, or slice and is
interpreted the same as `list` indexing.
Warning:
As you loop over frames, each frame is read from the file when it is
reached in the iteration. Multiple passes may lead to multiple disk
reads if the file does not fit in cache.
"""
if isinstance(key, slice):
return _HOOMDTrajectoryView(self, range(*key.indices(len(self))))
elif isinstance(key, int):
if key < 0:
key += len(self)
if key >= len(self) or key < 0:
raise IndexError()
return self.read_frame(key)
else:
raise TypeError
def __iter__(self):
"""Iterate over HOOMD trajectories."""
return _HOOMDTrajectoryIterable(self, range(len(self)))
def __enter__(self):
"""Enter the context manager."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""Close the file when the context manager exits."""
self.file.close()
def open(name, mode='rb'):
"""Open a hoomd schema GSD file.
The return value of `open` can be used as a context manager.
Args:
name (str): File name to open.
mode (str): File open mode.
Returns:
An `HOOMDTrajectory` instance that accesses the file *name* with the
given mode.
Valid values for mode:
+------------------+---------------------------------------------+
| mode | description |
+==================+=============================================+
| ``'rb'`` | Open an existing file for reading. |
+------------------+---------------------------------------------+
| ``'rb+'`` | Open an existing file for reading and |
| | writing. |
+------------------+---------------------------------------------+
| ``'wb'`` | Open a file for writing. Creates the file |
| | if needed, or overwrites an existing file. |
+------------------+---------------------------------------------+
| ``'wb+'`` | Open a file for reading and writing. |
| | Creates the file if needed, or overwrites |
| | an existing file. |
+------------------+---------------------------------------------+
| ``'xb'`` | Create a gsd file exclusively and opens it |
| | for writing. |
| | Raise an :py:exc:`FileExistsError` |
| | exception if it already exists. |
+------------------+---------------------------------------------+
| ``'xb+'`` | Create a gsd file exclusively and opens it |
| | for reading and writing. |
| | Raise an :py:exc:`FileExistsError` |
| | exception if it already exists. |
+------------------+---------------------------------------------+
| ``'ab'`` | Open an existing file for writing. |
| | Does *not* create or overwrite existing |
| | files. |
+------------------+---------------------------------------------+
"""
if fl is None:
raise RuntimeError("file layer module is not available")
if gsd is None:
raise RuntimeError("gsd module is not available")
gsdfileobj = fl.open(name=str(name),
mode=mode,
application='gsd.hoomd ' + gsd.__version__,
schema='hoomd',
schema_version=[1, 4])
return HOOMDTrajectory(gsdfileobj)
|
py
|
1a5cfc9126fd6218501fda906e6afff03cad9f87
|
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2016, Clearpath Robotics
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Open Source Robotics Foundation, Inc. nor
# the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from __future__ import print_function
import subprocess
import sys
import threading
import time
try:
import queue
except ImportError:
import Queue as queue
CONCURRENT_DEFAULT = 16
def freeze_distribution_sources(dist, release_version=False, release_tag=False,
concurrent_ops=CONCURRENT_DEFAULT, quiet=False):
# Populate this queue with tuples of repositories instances to be updated,
# so that this work can be spread across multiple threads.
work_queue = queue.Queue()
for repo_name, repo in dist.repositories.iteritems():
# Only manipulate distribution entries with a source repo listed.
if repo.source_repository:
# Decide which git ref string we'll be using as the replacement match.
if repo.release_repository and (release_version or release_tag):
version = repo.release_repository.version.split('-')[0]
else:
version = repo.source_repository.version
work_queue.put((repo.source_repository, version, release_tag))
total_items = work_queue.qsize()
for i in range(concurrent_ops):
threading.Thread(target=_worker, args=[work_queue]).start()
# Wait until the threads have done all the work and exited.
while not work_queue.empty():
time.sleep(0.1)
if not quiet:
sys.stdout.write("Updating source repo versions (%d/%d) \r" %
(total_items - work_queue.qsize(), total_items))
sys.stdout.flush()
work_queue.join()
# Clear past the updating line.
if not quiet:
print("")
# Get the repo commit information
def _get_repo_info(url, retry=2, retry_period=1):
cmd = ['git', 'ls-remote', url]
try:
return subprocess.check_output(cmd).splitlines()
except subprocess.CalledProcessError as err:
if not retry:
raise
print(' Non-zero return code for: %s, retrying in %f seconds' %
(' '.join(cmd), retry_period), file=sys.stderr)
# brief delay incase its an intermittent issue with infrastructure
time.sleep(retry_period)
return _get_repo_info(url, retry=retry - 1, retry_period=retry_period * 2)
def _worker(work_queue):
while True:
try:
source_repo, freeze_version, freeze_to_tag = work_queue.get(block=False)
ls_remote_lines = _get_repo_info(source_repo.url)
for line in ls_remote_lines:
hash, ref = line.split('\t', 1)
if freeze_to_tag and ref == 'refs/tags/%s' % freeze_version:
source_repo.version = ref.split('refs/tags/')[1]
break
elif ref in ('refs/heads/%s' % freeze_version, 'refs/tags/%s' % freeze_version):
source_repo.version = hash
break
work_queue.task_done()
except subprocess.CalledProcessError as e:
print("No information could be retrieved for repo %s with error: %s" %
(source_repo.url, e), file=sys.stderr)
work_queue.task_done()
except queue.Empty:
break
|
py
|
1a5cfcc822a07414a7ad9120e931e590f8815274
|
import tensorflow as tf
import numpy as np
from .gradients import GradientAttribution
class IntegratedGradients(GradientAttribution):
def GetMask(self, x_value, feed_dict={}, x_baseline=None, x_steps=25):
if x_baseline is None:
x_baseline = np.zeros_like(x_value)
assert x_baseline.shape == x_value.shape
x_diff = x_value - x_baseline
total_gradients = np.zeros_like(x_value)
for alpha in np.linspace(0, 1, x_steps):
x_step = x_baseline + alpha * x_diff
total_gradients += super().GetMask(x_step, feed_dict)
return total_gradients * x_diff / x_steps
|
py
|
1a5cfdf027e579f17585b5d71d8e6463801586c8
|
"""Tests for runway.cfngin.hooks.iam."""
import unittest
import boto3
from awacs.helpers.trust import get_ecs_assumerole_policy
from botocore.exceptions import ClientError
from moto import mock_iam
from runway.cfngin.hooks.iam import _get_cert_arn_from_response, create_ecs_service_role
from ..factories import mock_context, mock_provider
REGION = "us-east-1"
# No test for stacker.hooks.iam.ensure_server_cert_exists until
# updated version of moto is imported
# (https://github.com/spulec/moto/pull/679) merged
class TestIAMHooks(unittest.TestCase):
"""Tests for runway.cfngin.hooks.iam."""
def setUp(self):
"""Run before tests."""
self.context = mock_context(namespace="fake")
self.provider = mock_provider(region=REGION)
def test_get_cert_arn_from_response(self):
"""Test get cert arn from response."""
arn = "fake-arn"
# Creation response
response = {"ServerCertificateMetadata": {"Arn": arn}}
self.assertEqual(_get_cert_arn_from_response(response), arn)
# Existing cert response
response = {"ServerCertificate": response}
self.assertEqual(_get_cert_arn_from_response(response), arn)
def test_create_service_role(self):
"""Test create service role."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
with self.assertRaises(ClientError):
client.get_role(RoleName=role_name)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
def test_create_service_role_already_exists(self):
"""Test create service role already exists."""
with mock_iam():
client = boto3.client("iam", region_name=REGION)
role_name = "ecsServiceRole"
client.create_role(
RoleName=role_name,
AssumeRolePolicyDocument=get_ecs_assumerole_policy().to_json(),
)
self.assertTrue(
create_ecs_service_role(context=self.context, provider=self.provider,)
)
role = client.get_role(RoleName=role_name)
self.assertIn("Role", role)
self.assertEqual(role_name, role["Role"]["RoleName"])
policy_name = "AmazonEC2ContainerServiceRolePolicy"
client.get_role_policy(RoleName=role_name, PolicyName=policy_name)
|
py
|
1a5cfe68ea61f6f0c9b9663b92ba192f9bfb9d63
|
from multiprocessing import Pool
import torch.utils
import torch.utils.data
from data_utils import indexed_dataset
import torch
import os
import re
import pdb
from data_utils.tokenization import BertWordPieceTokenizer
key_word = {
"…":"...",
"—":"-",
"“":"\"",
"”":"\"",
"‘":"'",
"’":"'"
}
SPECIAL_SIGNAL = "./';,\(\)\"\"'~`''“”《》<>"
def cut_sentence(paragraph):
paragraph = paragraph.replace(" ", "")
sentences = re.split('(。|!|\!|?|\?)',paragraph) # 保留分割符
if len(sentences) == 1:
return [sentences[0]]
new_sents = []
for i in range(int(len(sentences)/2)):
sent = sentences[2*i] + sentences[2*i+1]
if len(new_sents) != 0 and (sent[0] in SPECIAL_SIGNAL or len(new_sents[-1]) < 20):
new_sents[-1] += sent
else:
new_sents.append(sent)
sent = sentences[-1]
if len(sentences) % 2 == 1 and len(sent) > 0:
if len(new_sents) != 0 and (sent[0] in SPECIAL_SIGNAL or len(new_sents[-1]) < 20):
new_sents[-1] += sent
else:
new_sents.append(sent)
return new_sents
def replace_text(text):
for key,value in key_word.items():
text = re.sub(key, value, text)
return text
def safe_readline(f):
pos = f.tell()
while True:
try:
return f.readline()
except UnicodeDecodeError:
pos -= 1
f.seek(pos) # search where this character begins
def read_split(
filename, tokenizer, worker_id, num_workers, type_doc, min_lens=10
):
with open(filename, 'r') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
result = []
line = f.readline()
while line:
line = replace_text(line)
ids = tokenizer.convert_text_to_ids(line)
ids = ids[:509]
if len(ids) >= min_lens:
ids = [type_doc]+ids
result.append(ids)
if f.tell() > end:
break
line = f.readline()
return result
def merge_multi_line(
filename, tokenizer, worker_id, num_workers, type_doc, min_lens=10
):
with open(filename, 'r') as f:
size = os.fstat(f.fileno()).st_size
chunk_size = size // num_workers
offset = worker_id * chunk_size
end = offset + chunk_size
eos_id = tokenizer.eos()
f.seek(offset)
if offset > 0:
safe_readline(f) # drop first incomplete line
result = []
line = f.readline()
tmp_ids = []
while line:
line = replace_text(line)
ids = tokenizer.convert_text_to_ids(line)+[eos_id]
# tmp_ids.extend(ids)
if len(tmp_ids) + len(ids) > 511:
ids_cur = tmp_ids[:511]
if ids_cur[0] == eos_id:
ids_cur[0] = type_doc
else:
ids_cur = [type_doc] + ids_cur
if ids_cur[-1] == eos_id:
ids_cur.pop()
ids_cur = ids_cur[:511]
result.append(ids_cur)
tmp_ids = tmp_ids[511:]
if len(tmp_ids) + len(ids) < 511:
tmp_ids += ids
else:
tmp_ids = ids[-511:]
else:
tmp_ids.extend(ids)
if f.tell() > end:
break
line = f.readline()
return result
def main_multi_task(args):
from argparse import ArgumentParser
parser = ArgumentParser()
# parser.add_argument("--tokenizer", type=str, help="where to load vocabulary")
parser.add_argument("--data", type=str)
parser.add_argument("--out", type=str, help="output path")
parser.add_argument("--prefix", type=str, default="train")
parser.add_argument("--workers", type=int, default=6)
parser.add_argument("--task", type=str, choices=['single', 'multi'], default="single")
args = parser.parse_args(args)
tokenizer = BertWordPieceTokenizer("bert-base-chinese", cache_dir="temp_cache_dir")
data_bin = os.path.join(args.out, "{}-CLM.bin".format(args.prefix))
data_idx = os.path.join(args.out, "{}-CLM.idx".format(args.prefix))
data_ds = indexed_dataset.IndexedDatasetBuilder(data_bin)
def comsume(worker_result):
for ids in worker_result:
data_ds.add_item(torch.IntTensor(ids)
)
pool = Pool(processes=args.workers)
worker_result = []
if args.task == "single":
handle_func = read_split
elif args.task == "multi":
handle_func = merge_multi_line
for i in range(args.workers):
w = pool.apply_async(
handle_func,
(
args.data,
tokenizer,
i,
args.workers,
0,
10
),
callback=comsume
)
worker_result.append(w)
pool.close()
pool.join()
data_ds.finalize(data_idx)
print("| write data into {}".format(args.out))
if __name__ == "__main__":
import sys
main_multi_task(sys.argv[1:])
|
py
|
1a5cfe6cd2d1cd2dde37d616c3a3fa4aac4d1484
|
# insertion sort practice
# exercises from introductions to algorithms 3rd edition
import numpy as np
import math
A = np.array([15, 26, 34, 31, 57, 93, 27])
# ascending order, 2.1
for j in range(1, np.size(A)):
key = A[j]
i = j - 1
while (i > -1) and (A[i] > key):
A[i+1] = A[i]
i = i-1
A[i+1] = key
print('Ascending order A is', A)
# descending order, exercise 2.1-2
for j in range(1, np.size(A)):
key = A[j]
i = j - 1
while (i > -1) and (A[i] < key):
A[i+1] = A[i]
i = i-1
A[i+1] = key
print('Descending order A is', A)
# find the index of a number v from A, exercise 2.1-3
v = 32
quitflag = 0
for j in range(np.size(A)):
key = v
if A[j] == key:
quitflag = 1
break
if quitflag == 0:
print('v is not in A')
else:
print('index is', j+1)
# or if def as a function, use return instead of quitflag
# sum of two n-bit binary integers, 2.1-4
A = [1, 0, 1, 0, 1, 0, 1]
B = [1, 1, 1, 0, 0, 1, 0]
C = []
n = len(A)
carry = 0
for i in range(n-1, -1, -1):
C.append((A[i] + B[i] + carry) % 2)
carry = math.floor((A[i] + B[i] + carry) / 2)
C.append(carry)
C.reverse()
print('C is', C)
|
py
|
1a5cfeb33e52220a80742d068b6ede159402e4f6
|
import numpy
import os
from grocsvs import datasets as svdatasets
from grocsvs import step
from grocsvs import utilities
from grocsvs.stages import call_readclouds
CHUNKSIZE = 5e7
def chunks_for_chrom(options, chrom):
return int(numpy.ceil(options.reference.chrom_lengths[chrom]/CHUNKSIZE))
class WindowBarcodesStep(step.StepChunk):
"""
Build a list of all the fragment barcodes overlapping each
genomic window
Output files:
bcwindows.sample.dataset.chrom.pickle - dictionary:
- barcode_windows - a list of sets of barcode IDs
- barcod_map - a dict of barcode->barcode ID
- window_size - the size of the genomic window used; eg 10,000 would
mean that window starts were range(0, chrom_length, 10000)
"""
@staticmethod
def get_steps(options):
for sample, dataset in options.iter_10xdatasets():
for chrom in options.reference.chroms:
for chunk in range(chunks_for_chrom(options, chrom)):
yield WindowBarcodesStep(
options, sample, dataset, chrom, chunk)
def __init__(self, options, sample, dataset, chrom, chunk):
self.options = options
self.sample = sample
self.dataset = dataset
self.chrom = chrom
self.chunk = chunk
assert isinstance(self.dataset, svdatasets.TenXDataset)
def __str__(self):
return ".".join([self.__class__.__name__,
self.sample.name,
self.dataset.id,
self.chrom,
str(self.chunk)])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
file_name = "bcwindows.{}.{}.{}.{}.pickle".format(
self.sample.name,
self.dataset.id,
self.chrom,
self.chunk)
paths = {
"bcwindows": os.path.join(directory, file_name)
}
return paths
def run(self):
import logging
logging.info("running!")
window_size = self.options.constants["window_size"]
outpath = self.outpaths(final=False)["bcwindows"]
self.logger.log("Loading barcode map...")
# call_readclouds_step = call_readclouds.FilterFragmentsStep(
input_step = call_readclouds.CombineReadcloudsStep(
self.options, self.sample, self.dataset)
barcode_map = utilities.pickle.load(
open(input_step.outpaths(final=True)["barcode_map"]))
chrom_length = self.options.reference.chrom_lengths[self.chrom]
start = int(self.chunk*CHUNKSIZE)
end = int(min((self.chunk+1)*CHUNKSIZE, chrom_length))
self.logger.log("Running chunk: {}:{:,}-{:,}".format(self.chrom, start, end))
fragments = call_readclouds.load_fragments(
self.options, self.sample, self.dataset,
self.chrom, start, end, min_reads_per_frag=0)
barcode_windows = get_barcode_windows(
fragments, barcode_map, window_size, chrom_length, start, end)
self.logger.log("Saving results...")
result = {
"barcode_windows": barcode_windows,
# "barcode_map": barcode_map,
"nbcs": len(barcode_map),
"window_size": window_size
}
utilities.pickle.dump(result, open(outpath, "w"), protocol=-1)
def get_barcode_windows(fragments, barcode_map, window_size, chrom_length, start, end):
window_starts = range(start, end, window_size)
barcode_windows = []
for start in window_starts:
end = start + window_size
overlap = utilities.frags_overlap_same_chrom(fragments, start, end)
barcodes = set(barcode_map[bc] for bc in overlap["bc"])
barcode_windows.append(barcodes)
return barcode_windows
|
py
|
1a5cfeb7cb856e98273ebd150581f662b7069cac
|
from mininet.log import debug, error, output
from mininet.node import Host, OVSSwitch
from varanuspy.functions import special, getqos, setqos, delqos, resetqos, get_globals, get_locals
from varanuspy.minadapter import CustomMixin
from varanuspy.rcli import status_rcli, start_rcli, stop_rcli
from varanuspy.utils import some, as_bool, newline, is_some, is_somestr
def get_cmds():
return {
'pxx' : _pxx,
'rcli' : _rcli,
'restartsw' : _restart_switches,
'control' : _control_node,
'getqos' : _get_link_qos,
'setqos' : _set_link_qos,
'delqos' : _del_link_qos,
'resetqos' : _reset_qos,
'flows' : _ovs_dump_flows,
'nuttcp' : _run_nuttcp,
'nutudp' : _run_nutudp
}
def _pxx( cli, line ):
""" Executes a python statement like command 'px', except newly defined variables or functions will be temporary.
Additionally, the following functions are available:
- cli(cmd) : call a command as if it was called in the console
- special(name) : returns a SpecialNode object that can be passed to other functions
- cmd(node, *args) : call a command on the given node and return its output when complete
- nodes() : returns a list of all nodes
- switches() : returns a list of all switches
- hosts() : returns a list of all hosts
- links() : returns a list of all (unidirectional) links
- links(src, dst) : returns the (unidirectional) links between the given nodes
- sid(dpid) : returns the switch object that has the given DPID, or None if none exists
- hip(ip_address) : returns the host object that has the given IP address, or None if none exists
- isremote(node) : returns true if the given node is remote, or false otherwise
- nsrclinks(node) : returns a list of links starting from the given node
- ssrclinks(node) : returns a list of links starting from the given node and ending in switches
- hsrclinks(node) : returns a list of links starting from the given node and ending in hosts
- ndstlinks(node) : returns a list of links ending on the given node
- sdstlinks(node) : returns a list of links ending on the given node and starting in switches
- hdstlinks(node) : returns a list of links ending on the given node and starting in hosts
- getqos(src, dst) : returns (bandwidth, netem_configuration)
- setqos(src, dst, band, netem): sets a bandwidth and a Netem configuration on the given link
- delqos(src, dst) : removes the bandwidth and Netem configurations from the given link
- resetqos() : removes all bandwidth and Netem configurations in the local machine
Usage: pxx <python statement>
"""
try:
globs = get_globals( cli.mn )
globs['cli'] = lambda cmd : cli.onecmd( cmd )
locs = get_locals( cli.mn )
exec line in globs, locs
except Exception as e:
output( newline( str( e ) ) )
def _rcli( cli, line ):
""" Listens for TCP connections at the specified port and accepts CLI commands from active clients.
NOTE: Whenever we mention strings to be transmitted, we mean UTF-8 strings prefixed by a big-endian 4-byte value
representing the string length
Commands have the format '<type><value>', where:
- <type> is a byte value representing the type of command to execute
- <value> is a string representing the command value that is interpreted differently according to its type
The following command types are available (represented as integers):
- 0: the command value is interpreted as a python expression and its result is sent back to the client through
the same TCP connection
- 1: the command value is interpreted as a shell command to be executed on a specific node and its output can be
progressively sent to a designated TCP address until the command finishes or the client interrupts it
For python expression commands, the possible result formats are:
- If the expression returned None : the byte '0'
- If the expression returned <something>: the byte '1' followed by a string representation of <something>
- If the expression raised an exception : the byte '2' followed by a string representation of the exception
For python expression commands, the following functions are available:
- special(name) : returns a SpecialNode object that can be passed to other functions
- cmd(node, *args) : call a command on the given node and return its output when complete
- nodes() : returns a list of all nodes
- switches() : returns a list of all switches
- hosts() : returns a list of all hosts
- links() : returns a list of all (unidirectional) links
- links(src, dst) : returns the (unidirectional) links between the given nodes
- sid(dpid) : returns the switch object that has the given DPID, or None if none exists
- hip(ip_address) : returns the host object that has the given IP address, or None if none exists
- isremote(node) : returns true if the given node is remote, or false otherwise
- nsrclinks(node) : returns a list of links starting from the given node
- ssrclinks(node) : returns a list of links starting from the given node and ending in switches
- hsrclinks(node) : returns a list of links starting from the given node and ending in hosts
- ndstlinks(node) : returns a list of links ending on the given node
- sdstlinks(node) : returns a list of links ending on the given node and starting in switches
- hdstlinks(node) : returns a list of links ending on the given node and starting in hosts
- getqos(src, dst) : returns (bandwidth, netem_configuration)
- setqos(src, dst, band, netem): sets a bandwidth and a Netem configuration on the given link
- delqos(src, dst) : removes the bandwidth and Netem configurations from the given link
- resetqos() : removes all bandwidth and Netem configurations in the local machine
Shell command details:
- A shell command is executed asynchronously by the server until it finishes or until the client requests for it
to stop
- The command output can be sent to a designated TCP socket address as it is available, line by line
- Each command is uniquely identified by a client-provided key string and has a distinct TCP connection to
(optionally) send the output
- Stopping a command is done either by terminating the process (sending a SIGTERM signal to it) or by executing
a custom command provided by the client
For shell commands, the possible command value formats are:
- "start <key> <node> <host>:<port> <command>[ <command args>]*"
- "start_no_output <key> <node> <command>[ <command args>]*"
- "stop <key>"
- "stop_custom <key> <command>[ <command_args>]*"
For shell commands, the possible result formats are (sent back through the main TCP connection):
- If the command was successfully started/stopped: the byte '1' followed by the command key string
- If the command could not be started/stopped : the byte '2' followed by the command key string followed by
an error string
For shell commands, the format of each output line is <command key><output line>
Usage: rcli { start <local_port> | stop | status }
"""
usage = 'rcli { start <local_port> | stop | status }'
args = line.split()
if len( args ) < 1:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
if args[0] == 'status':
__status_rcli()
elif args[0] == 'stop':
__stop_rcli()
elif args[0] == 'start':
if len( args ) < 2:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
__start_rcli( cli, args[1] )
else:
error( newline( 'Invalid argument' ) )
error( newline( 'Usage:', usage ) )
def __status_rcli():
if status_rcli():
output( newline( '< RCLI server is active >' ) )
else:
output( newline( '< RCLI server is inactive >' ) )
def __stop_rcli():
if stop_rcli():
output( newline( '< RCLI server was stopped >' ) )
else:
error( newline( 'RCLI server is inactive, nothing to stop' ) )
def __start_rcli( cli, listenport ):
try:
if not start_rcli( listenport, cli.mn ):
error( newline( 'An RCLI server is already active, cannot start a new one' ) )
except ValueError as e:
error( newline( e ) )
def _restart_switches( cli, line ):
""" Stops and restarts one or more switches
Usage: restartsw <switch>[, <switch>]*
"""
usage = 'restartsw <switch>[, <switch>]*'
args = line.split()
if len( args ) < 1:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
sw_names = args
switches = []
for name in sw_names:
if name not in cli.mn:
error( newline( 'Switch', '"' + name + '"', 'does not exist' ) )
return
else:
switches.append( cli.mn[ name ] )
for sw in switches:
output( newline( 'Restarting switch', sw, '...' ) )
stopline = 'switch {} stop'.format( sw )
startline = 'switch {} start'.format( sw )
cli.onecmd( stopline )
cli.onecmd( startline )
output( newline( '<done>' ) )
def _control_node( cli, line ):
""" Executes a method on one or more Mininet nodes (controllers, hosts, switches, etc.)
Usage: control <op> <node>[, <node>]*
"""
usage = 'control <op> <node>[, <node>]*'
args = line.split()
if len( args ) < 2:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
op_name = args[0]
node_names = args[1:]
nodes = []
for name in node_names:
if name not in cli.mn:
error( newline( 'Node', '"' + name + '"', 'does not exist' ) )
return
else:
node = cli.mn[ name ]
if not isinstance( node, CustomMixin ):
error( newline( 'Node', node, 'cannot be controlled in this manner' ) )
else:
nodes.append( node )
ops = []
for node in nodes:
op = getattr( node, op_name ) if hasattr( node, op_name ) else None
if not is_some( op ) or not callable( op ):
error( newline( 'Unrecognized method', '"' + op_name + '"', \
'on node', node ) )
return
else:
ops.append( op )
# Run all the methods once we know they're safe
for op in ops:
op( cli=cli )
def _get_link_qos( cli, line ):
""" Returns the bandwidth and Netem configurations currently assigned to the provided unidirectional link.
Usage: getqos <src_node> <dst_node>
"""
usage = 'getqos <src_node> <dst_node>'
args = line.split()
if len( args ) != 2:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
try:
src = special( cli.mn, args[0] )
dst = special( cli.mn, args[1] )
band, netem = getqos( cli.mn, src, dst )
output( newline( 'Bandwidth: {}'.format( band ) ) )
output( newline( 'Netem : {}'.format( netem ) ) )
except ( ValueError, RuntimeError ) as e:
error( newline( e ) )
def _set_link_qos( cli, line ):
""" Sets up bandwidth and Netem configurations on the provided unidirectional link.
Usage: setqos <src_node> <dst_node> <bandwidth> [<netem_params...>]
"""
usage = 'setqos <src_node> <dst_node> <bandwidth> [<netem_params...>]'
args = line.split()
if len( args ) < 3:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
try:
src = special( cli.mn, args[0] )
dst = special( cli.mn, args[1] )
band = args[2]
if len( args ) > 3:
netem = ' '.join( args[3:] )
else:
netem = None
if setqos( cli.mn, src, dst, band, netem ):
output( newline( '<done>' ) )
else:
output( newline( '<operation failed>' ) )
except ( ValueError, RuntimeError ) as e:
error( newline( e ) )
def _del_link_qos( cli, line ):
""" Removes bandwidth and Netem configurations from the provided unidirectional link.
Usage: delqos <src_node> <dst_node>
"""
usage = 'delqos <src_node> <dst_node>'
args = line.split()
if len( args ) != 2:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
try:
src = special( cli.mn, args[0] )
dst = special( cli.mn, args[1] )
if delqos( cli.mn, src, dst ):
output( newline( '<done>' ) )
else:
output( newline( '<operation failed>' ) )
except ( ValueError, RuntimeError ) as e:
error( newline( e ) )
def _reset_qos( _cli, line ):
""" Removes all bandwidth and Netem configurations in the local machine.
Usage: resetqos
"""
usage = 'resetqos'
args = line.split()
if len( args ) != 0:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
if resetqos():
output( newline( '<done>' ) )
else:
output( newline( '<operation failed>' ) )
def _ovs_dump_flows( cli, line ):
""" Runs ovs-ofctl in one or more OvS switches to dump the flows of each switch.
Usage: flows <switch>[, <switch>]*
"""
usage = 'flows <switch>[, <switch>]*'
args = line.split()
if len( args ) < 1:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
else:
sw_names = args
switches = []
for name in sw_names:
if name not in cli.mn:
error( newline( 'Switch', '"' + name + '"', 'does not exist' ) )
return
else:
sw = cli.mn[ name ]
if not isinstance( sw, OVSSwitch ):
error( newline( '"' + name + '"', 'is not an OvS switch' ) )
return
else:
switches.append( sw )
for sw in switches:
intro = '{} flows'.format( sw )
output( newline() )
output( newline( '=' * 80 ) )
output( newline( intro.center( 80 ) ) )
output( newline( '=' * 80 ) )
ofver = some( sw.protocols, name='sw.protocols' )
cmdline = '{0} ovs-ofctl -O {1} dump-flows \'{0}\''.format( sw, ofver )
cli.onecmd( cmdline )
def _run_nuttcp( cli, line ):
""" Runs nuttcp between a pair of client/server hosts with TCP traffic for 60 minutes.
Usage: nuttcp client_host server_host [rate_limit]
"""
usage = 'nuttcp client_host server_host [rate_limit]'
try:
client, server, rate = __parse_nuttcp_args( cli, line, usage )
__run_nuttcp( cli, client, server, rate=rate )
except ValueError:
pass
def _run_nutudp( cli, line ):
""" Runs nuttcp between a pair of client/server hosts with UDP traffic for 60 minutes.
Usage: nutudp client_host server_host [rate_limit]
"""
usage = 'nutudp client_host server_host [rate_limit]'
try:
client, server, rate = __parse_nuttcp_args( cli, line, usage )
__run_nuttcp( cli, client, server, rate=rate, udp=True )
except ValueError:
pass
def __parse_nuttcp_args( cli, line, usage ):
args = line.split()
if len( args ) < 2 or len( args ) > 3:
error( newline( 'Invalid number of arguments' ) )
error( newline( 'Usage:', usage ) )
raise ValueError()
else:
cname = args[0]
sname = args[1]
rate = args[2] if len( args ) == 3 else None
if cname not in cli.mn:
error( newline( 'Host', '"' + cname + '"', 'does not exist' ) )
raise ValueError()
elif sname not in cli.mn:
error( newline( 'Host', '"' + sname + '"', 'does not exist' ) )
raise ValueError()
else:
client = cli.mn[ cname ]
server = cli.mn[ sname ]
if not isinstance( client, Host ):
error( newline( '"' + cname + '"', 'is not a Host' ) )
raise ValueError()
elif not isinstance( server, Host ):
error( newline( '"' + sname + '"', 'is not a Host' ) )
raise ValueError()
else:
return ( client, server, rate )
def __run_nuttcp( cli, client, server, rate=None, udp=False ):
traffic = 'UDP traffic' if as_bool( udp ) is True else 'TCP traffic'
if is_somestr( rate ):
traffic = '{} ({})'.format( traffic, rate )
intro = 'Running nuttcp between ' + str( client ) + ' and ' + str( server )\
+ ' with ' + traffic + ' for 60 minutes'
output( newline() )
output( newline( '=' * 80 ) )
output( newline( intro.center( 80 ) ) )
output( newline( '=' * 80 ) )
sstartline = '{} nuttcp -S'.format( server )
sstopline = '{} pkill -f \'nuttcp -S\''.format( server )
copts = ''
if as_bool( udp ) is True:
copts += ' -u'
if is_somestr( rate ):
copts += ' -Ri{}'.format( rate )
cline = '{} nuttcp -ib -T60m{} {}'.format( client, copts, server.IP() )
output( newline( '- Starting nuttcp server at', server ) )
debug( newline( 'Running command: ', sstartline ) )
cli.onecmd( sstartline )
output( newline( '- Running nuttcp client at', client ) )
debug( newline( 'Running command: ', cline ) )
cli.onecmd( cline )
output( newline( '- Stopping nuttcp server at', server ) )
debug( newline( 'Running command: ', sstopline ) )
cli.onecmd( sstopline )
|
py
|
1a5cfeda90697a788af0d026640da94cd1bc127a
|
# Automatically generated
# pylint: disable=all
get = [{'SupportedArchitectures': ['i386', 'x86_64'], 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 3788, 'TotalSizeInGB': 410, 'Disks': [{'SizeInGB': 410, 'Count': 1, 'Type': 'hdd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm1.medium', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64']}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 3788}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 410, 'Disks': [{'SizeInGB': 410, 'Count': 1, 'Type': 'hdd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 1740, 'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 160, 'Count': 1, 'Type': 'hdd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Low', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'm1.small', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64']}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 1740}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 160, 'Disks': [{'SizeInGB': 160, 'Count': 1, 'Type': 'hdd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 627, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Very Low', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Very Low', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't1.micro', 'CurrentGeneration': False, 'FreeTierEligible': True, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64']}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 627}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Very Low', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Very Low', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 1024, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.micro', 'CurrentGeneration': True, 'FreeTierEligible': True, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 1024}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.4, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 512, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.nano', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.4}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 512}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 2, 'Ipv6AddressesPerInterface': 2, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5, 'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 2048, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.small', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.5}, 'VCpuInfo': {'DefaultVCpus': 1, 'DefaultCores': 1, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 2048}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 4, 'Ipv6AddressesPerInterface': 4, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 1740, 'TotalSizeInGB': 350, 'Disks': [{'SizeInGB': 350, 'Count': 1, 'Type': 'hdd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 'c1.medium', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64']}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 1740}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 350, 'Disks': [{'SizeInGB': 350, 'Count': 1, 'Type': 'hdd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'unsupported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 2}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 0, 'Ipv6Supported': False, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': False, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': False, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8, 'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2], 'SizeInMiB': 3840, 'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'supported', 'EbsOptimizedSupport': 'unsupported', 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['cluster', 'partition', 'spread'], 'InstanceType': 'c3.large', 'CurrentGeneration': False, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs', 'instance-store'], 'SupportedVirtualizationTypes': ['hvm', 'paravirtual'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.8}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 1, 'DefaultThreadsPerCore': 2, 'ValidCores': [1], 'ValidThreadsPerCore': [1, 2]}, 'MemoryInfo': {'SizeInMiB': 3840}, 'InstanceStorageSupported': True, 'InstanceStorageInfo': {'TotalSizeInGB': 32, 'Disks': [{'SizeInGB': 16, 'Count': 2, 'Type': 'ssd'}], 'NvmeSupport': 'unsupported', 'EncryptionSupport': 'unsupported'}, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 10, 'Ipv6AddressesPerInterface': 10, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['cluster', 'partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': False, 'DedicatedHostsSupported': True, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}, {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.3, 'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1, 'SizeInMiB': 4096, 'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported', 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False, 'SupportedStrategies': ['partition', 'spread'], 'InstanceType': 't2.medium', 'CurrentGeneration': True, 'FreeTierEligible': False, 'SupportedUsageClasses': ['on-demand', 'spot'], 'SupportedRootDeviceTypes': ['ebs'], 'SupportedVirtualizationTypes': ['hvm'], 'BareMetal': False, 'Hypervisor': 'xen', 'ProcessorInfo': {'SupportedArchitectures': ['i386', 'x86_64'], 'SustainedClockSpeedInGhz': 2.3}, 'VCpuInfo': {'DefaultVCpus': 2, 'DefaultCores': 2, 'DefaultThreadsPerCore': 1}, 'MemoryInfo': {'SizeInMiB': 4096}, 'InstanceStorageSupported': False, 'EbsInfo': {'EbsOptimizedSupport': 'unsupported', 'EncryptionSupport': 'supported', 'NvmeSupport': 'unsupported'}, 'NetworkInfo': {'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3, 'MaximumNetworkCards': 1, 'DefaultNetworkCardIndex': 0, 'NetworkCards': [{'NetworkCardIndex': 0, 'NetworkPerformance': 'Low to Moderate', 'MaximumNetworkInterfaces': 3}], 'Ipv4AddressesPerInterface': 6, 'Ipv6AddressesPerInterface': 6, 'Ipv6Supported': True, 'EnaSupport': 'unsupported', 'EfaSupported': False, 'EncryptionInTransitSupported': False}, 'PlacementGroupInfo': {'SupportedStrategies': ['partition', 'spread']}, 'HibernationSupported': True, 'BurstablePerformanceSupported': True, 'DedicatedHostsSupported': False, 'AutoRecoverySupported': True, 'SupportedBootModes': ['legacy-bios']}] # noqa: E501
def get_instances_list() -> list:
'''Returns list EC2 instances with SupportedArchitectures = i386 .'''
# pylint: disable=all
return get
|
py
|
1a5cff9890461889581a37994fd5a2c01d4f1759
|
"""Support for FRITZ!Box routers."""
from __future__ import annotations
import datetime
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN as DEVICE_TRACKER_DOMAIN,
PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA,
SOURCE_TYPE_ROUTER,
)
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import ConfigType
from .common import (
FritzBoxTools,
FritzData,
FritzDevice,
FritzDeviceBase,
device_filter_out_from_trackers,
)
from .const import DATA_FRITZ, DOMAIN
_LOGGER = logging.getLogger(__name__)
YAML_DEFAULT_HOST = "169.254.1.1"
YAML_DEFAULT_USERNAME = "admin"
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_USERNAME),
cv.deprecated(CONF_PASSWORD),
PARENT_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=YAML_DEFAULT_HOST): cv.string,
vol.Optional(CONF_USERNAME, default=YAML_DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
}
),
)
async def async_get_scanner(hass: HomeAssistant, config: ConfigType) -> None:
"""Import legacy FRITZ!Box configuration."""
_LOGGER.debug("Import legacy FRITZ!Box configuration from YAML")
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[DEVICE_TRACKER_DOMAIN],
)
)
_LOGGER.warning(
"Your Fritz configuration has been imported into the UI, "
"please remove it from configuration.yaml. "
"Loading Fritz via scanner setup is now deprecated"
)
return None
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up device tracker for FRITZ!Box component."""
_LOGGER.debug("Starting FRITZ!Box device tracker")
router: FritzBoxTools = hass.data[DOMAIN][entry.entry_id]
data_fritz: FritzData = hass.data[DATA_FRITZ]
@callback
def update_router() -> None:
"""Update the values of the router."""
_async_add_entities(router, async_add_entities, data_fritz)
entry.async_on_unload(
async_dispatcher_connect(hass, router.signal_device_new, update_router)
)
update_router()
@callback
def _async_add_entities(
router: FritzBoxTools,
async_add_entities: AddEntitiesCallback,
data_fritz: FritzData,
) -> None:
"""Add new tracker entities from the router."""
new_tracked = []
if router.unique_id not in data_fritz.tracked:
data_fritz.tracked[router.unique_id] = set()
for mac, device in router.devices.items():
if device_filter_out_from_trackers(mac, device, data_fritz.tracked.values()):
continue
new_tracked.append(FritzBoxTracker(router, device))
data_fritz.tracked[router.unique_id].add(mac)
if new_tracked:
async_add_entities(new_tracked)
class FritzBoxTracker(FritzDeviceBase, ScannerEntity):
"""This class queries a FRITZ!Box router."""
def __init__(self, router: FritzBoxTools, device: FritzDevice) -> None:
"""Initialize a FRITZ!Box device."""
super().__init__(router, device)
self._last_activity: datetime.datetime | None = device.last_activity
@property
def is_connected(self) -> bool:
"""Return device status."""
return self._router.devices[self._mac].is_connected
@property
def unique_id(self) -> str:
"""Return device unique id."""
return f"{self._mac}_tracker"
@property
def mac_address(self) -> str:
"""Return mac_address."""
return self._mac
@property
def icon(self) -> str:
"""Return device icon."""
if self.is_connected:
return "mdi:lan-connect"
return "mdi:lan-disconnect"
@property
def extra_state_attributes(self) -> dict[str, str]:
"""Return the attributes."""
attrs: dict[str, str] = {}
device = self._router.devices[self._mac]
self._last_activity = device.last_activity
if self._last_activity is not None:
attrs["last_time_reachable"] = self._last_activity.isoformat(
timespec="seconds"
)
if device.connected_to:
attrs["connected_to"] = device.connected_to
if device.connection_type:
attrs["connection_type"] = device.connection_type
if device.ssid:
attrs["ssid"] = device.ssid
return attrs
@property
def source_type(self) -> str:
"""Return tracker source type."""
return SOURCE_TYPE_ROUTER
|
py
|
1a5cffb57e854ca544058e0cfc7cd3ad10947d9d
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .deploy_stage_execution_progress import DeployStageExecutionProgress
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ComputeInstanceGroupCanaryDeployStageExecutionProgress(DeployStageExecutionProgress):
"""
Specifies the Instance Group Canary deployment stage.
"""
def __init__(self, **kwargs):
"""
Initializes a new ComputeInstanceGroupCanaryDeployStageExecutionProgress object with values from keyword arguments. The default value of the :py:attr:`~oci.devops.models.ComputeInstanceGroupCanaryDeployStageExecutionProgress.deploy_stage_type` attribute
of this class is ``COMPUTE_INSTANCE_GROUP_CANARY_DEPLOYMENT`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param deploy_stage_display_name:
The value to assign to the deploy_stage_display_name property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_display_name: str
:param deploy_stage_type:
The value to assign to the deploy_stage_type property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_type: str
:param deploy_stage_id:
The value to assign to the deploy_stage_id property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_id: str
:param time_started:
The value to assign to the time_started property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type time_started: datetime
:param time_finished:
The value to assign to the time_finished property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type time_finished: datetime
:param status:
The value to assign to the status property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED", "ROLLBACK_IN_PROGRESS", "ROLLBACK_SUCCEEDED", "ROLLBACK_FAILED"
:type status: str
:param deploy_stage_predecessors:
The value to assign to the deploy_stage_predecessors property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_predecessors: oci.devops.models.DeployStagePredecessorCollection
:param deploy_stage_execution_progress_details:
The value to assign to the deploy_stage_execution_progress_details property of this ComputeInstanceGroupCanaryDeployStageExecutionProgress.
:type deploy_stage_execution_progress_details: list[oci.devops.models.DeployStageExecutionProgressDetails]
"""
self.swagger_types = {
'deploy_stage_display_name': 'str',
'deploy_stage_type': 'str',
'deploy_stage_id': 'str',
'time_started': 'datetime',
'time_finished': 'datetime',
'status': 'str',
'deploy_stage_predecessors': 'DeployStagePredecessorCollection',
'deploy_stage_execution_progress_details': 'list[DeployStageExecutionProgressDetails]'
}
self.attribute_map = {
'deploy_stage_display_name': 'deployStageDisplayName',
'deploy_stage_type': 'deployStageType',
'deploy_stage_id': 'deployStageId',
'time_started': 'timeStarted',
'time_finished': 'timeFinished',
'status': 'status',
'deploy_stage_predecessors': 'deployStagePredecessors',
'deploy_stage_execution_progress_details': 'deployStageExecutionProgressDetails'
}
self._deploy_stage_display_name = None
self._deploy_stage_type = None
self._deploy_stage_id = None
self._time_started = None
self._time_finished = None
self._status = None
self._deploy_stage_predecessors = None
self._deploy_stage_execution_progress_details = None
self._deploy_stage_type = 'COMPUTE_INSTANCE_GROUP_CANARY_DEPLOYMENT'
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py
|
1a5d00145023c34bedcdcd12a893f0bc577134e3
|
from replit import clear
from art import logo
print(logo)
bids = {}
bidding_finished = False
def find_highest_bidder(bidding_record):
maximum = max(bidding_record, key=bidding_record.get)
print(f"The winner is {maximum} with a bid of ${bidding_record[maximum]}")
while True:
name = input("What is your name?: ")
price = int(input("What is your bid?: $"))
bids[name] = price
should_continue = input(
"Are there any other bidders? Type 'yes or 'no'.\n")
if should_continue == "no":
break
elif should_continue == "yes":
clear()
find_highest_bidder(bids)
|
py
|
1a5d0020132cb059d462af946dad573d02edd7a6
|
import torch
import torch.nn as nn
from torch.nn.functional import relu
from torch.nn.utils.rnn import pad_sequence
class Regressor():
"""A PyTorch MLP model consisting of an MLP for each module type.
The model is learnt only on single module.
The model takes as input the input power and the meta data of the
corresponding cascade. To predict the output power the model
simply cascades the different MLPs matching the input module cascade."""
def __init__(self):
super().__init__()
# Since the model need meta data present in the data
# we will only instantiate the model when calling the fit function
self.Model = PyTorchModel # PyTorch model class
self.model = None # PyTorch model instance
self.mod_id = None # Module IDs
def fit(self, X, y):
# Retrieve some information about the modules from the data
all_mods = set(
[(("type", mod[0]), ("nb_feat", len(mod[1]))) for seq, _, _ in X
for mod in seq])
mod_info = [dict(m) for m in all_mods]
self.mod_id = {mod["type"]: i for i, mod in enumerate(mod_info)}
# Instantiate the PyTorch model
self.model = self.Model(mod_info)
# Turn on training mode
self.model.train()
# Get data and create train data loaders
data_list = [{"mod_id_seq": torch.tensor(
[self.mod_id[mod] for mod, _ in mod_seq]),
"mod_feat_seq_list": [torch.tensor(feat).float() for
_, feat in mod_seq],
"input_power": torch.tensor(p_in).float(),
"output_power": torch.tensor(p_out).float()} for
(mod_seq, p_in, campaign_id), p_out in zip(X, y)]
train_loader = torch.utils.data.DataLoader(data_list, batch_size=128,
collate_fn=collate_fn)
# Instantiate criterion and optimizer
crit = torch.nn.MSELoss()
opt = torch.optim.Adam(self.model.parameters(), lr=0.0001)
# Training loop
for e in range(100):
for data in train_loader:
(mod_id_seq, mod_feat_seq, p_in), p_out = data
opt.zero_grad()
preds = self.model(mod_id_seq, mod_feat_seq, p_in)
# Since the evaluation is only done for on-channels it
# helps the optimization to only backpropagate through them.
on_chan = p_in != 0
on_preds = torch.mul(on_chan, preds)
on_p_out = torch.mul(on_chan, p_out)
loss = crit(on_preds, on_p_out)
# Since we are only looking at single modules
if loss.requires_grad:
loss.backward()
opt.step()
def predict(self, X):
# Turn on evaluation mode
self.model.eval()
# No ground truth when predicting, format input arguments
# Input powers
p_in = torch.stack([torch.tensor(p_in).float() for _, p_in, _ in X])
# Module features
mod_feat_seq = [[torch.tensor(feat).float() for _, feat in mod_seq]
for mod_seq, _, _ in X]
# Module IDs
mod_id_seq = [torch.tensor([self.mod_id[mod] for mod, _ in mod_seq])
for mod_seq, _, _ in X]
mod_id_seq = pad_sequence(mod_id_seq, batch_first=True,
padding_value=-1)
# Model prediction
preds = self.model(mod_id_seq, mod_feat_seq, p_in).detach().numpy()
return preds
class PyTorchModel(torch.nn.Module):
def __init__(self, mod_info):
super(PyTorchModel, self).__init__()
self.mod_info = mod_info
# Construct as many MLPs as modules present in the data
self.MLPs = torch.nn.ModuleList(
[MLP(m["nb_feat"]) for m in self.mod_info])
def forward(self, mod_id_seq, mod_feat_seq, p_in):
seq_len = torch.tensor(list(map(len, mod_feat_seq)))
p_out = p_in
max_nb_mod = max(seq_len)
for n in range(max_nb_mod):
for i, mlp in enumerate(self.MLPs):
msk = torch.mul(mod_id_seq[:, n] == i, seq_len > n)
if msk.any():
feats = torch.stack(
[f[n] for i, f in enumerate(mod_feat_seq) if msk[i]])
p_out[msk] = mlp(torch.cat([p_out[msk], feats], dim=-1))
# Return positive values when evaluating the model
return p_out if self.training else relu(p_out)
class MLP(torch.nn.Module):
"""A simple two layer MLP taking as input the
input powers and the features of the module"""
def __init__(self, feat_size):
super(MLP, self).__init__()
self.drop_layer = nn.Dropout(p=0.5)
# Definition of the modules of the model
# Two fully connected layers
self.fc0 = torch.nn.Linear(32 + feat_size, 256)
self.fc1 = torch.nn.Linear(256, 256)
self.fc2 = torch.nn.Linear(256, 256)
self.fc3 = torch.nn.Linear(256, 32)
def forward(self, x):
# Compute the output of the model using a tanh activation function
p_out = self.drop_layer(self.fc1(relu(self.fc0(x))))
p_out = self.fc3(relu(self.fc2(p_out)))
return p_out
def collate_fn(batch):
# Power output
p_out = torch.stack([sample["output_power"] for sample in batch])
# Power input
p_in = torch.stack([sample["input_power"] for sample in batch])
# Module id
l_id_seq = [sample["mod_id_seq"] for sample in batch]
mod_id_seq = pad_sequence(l_id_seq, batch_first=True, padding_value=-1)
# Module features
mod_feat_seq = [sample["mod_feat_seq_list"] for sample in batch]
return (mod_id_seq, mod_feat_seq, p_in), p_out
|
py
|
1a5d00fb1cd5e3ea4cd1fae97c3fb6af6c26d2a7
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import struct
# Reference: draft-krovetz-vmac-01.txt
# This only implements VMAC with AES as cipher and 64 bit tags.
BLOCKSIZE = 16 # block size of AES in bytes
L1KEYSIZE = 128 # size of the L1 key in bytes
MASK_POLY = 0x1FFFFFFF1FFFFFFF1FFFFFFF1FFFFFFF
P127 = 2 ** 127 - 1
P64 = 2 ** 64 - 257
PP = 2 ** 64 - 2 ** 32
def nh(k, m):
mask64 = 0xffffffffffffffff
res = 0
for i in range(0, len(m), 2):
res += ((m[i] + k[i]) & mask64) * ((m[i+1] + k[i+1]) & mask64)
return res % 2**126
class Vmac64:
def __init__(self, key: bytes):
self.cipher = self.create_cipher(key)
self.l1_keys = self.kdf_int(128, 0, L1KEYSIZE // 8)
self.l2_keys = self.kdf_int(192, 0, 2)
idx = 1
while True:
k0, k1 = self.kdf_int(224, 2 * (idx - 1), 2 * idx)
if (k0 < P64) and (k1 < P64):
self.l3_keys = k0, k1
break
idx += 1
def create_cipher(self, key):
if isinstance(key, bytearray):
key = bytes(key)
assert isinstance(key, bytes) and len(key) in (16, 24, 32)
return Cipher(algorithms.AES(key), modes.ECB(), default_backend())
def encrypt_block(self, ba) -> bytes:
encryptor = self.cipher.encryptor()
assert len(ba) == 16
if isinstance(ba, bytearray):
ba = bytes(ba)
return encryptor.update(ba) + encryptor.finalize()
def kdf(self, index: int, size: int) -> bytes:
if size % BLOCKSIZE > 0:
return self.kdf(index, size + (-size % BLOCKSIZE))[:size]
res = bytearray(size)
for i in range(size // BLOCKSIZE):
inp = bytes([index] + [0] * 14 + [i])
res[BLOCKSIZE * i : BLOCKSIZE * (i+1)] = self.encrypt_block(inp)
return bytes(res)
def kdf_int(self, index: int, start: int, stop: int):
ba = self.kdf(index, 8 * stop)
return struct.unpack('>%dQ' % (stop - start), ba[8 * start: 8 * stop])
def pdf(self, nonce: bytes) -> bytes:
index = nonce[-1] % 2
block = bytearray(BLOCKSIZE - len(nonce)) + nonce
block[-1] -= index
enc = self.encrypt_block(bytes(block))
return enc[8 * index : 8 * (index + 1)]
def l1_hash(self, m: bytes):
k = self.l1_keys
blocks = (len(m) + L1KEYSIZE - 1) // L1KEYSIZE
fullblocks = len(m) // L1KEYSIZE
y = [None] * blocks
cnt = L1KEYSIZE // 8
fmt = '<%dQ' % cnt
for i in range(fullblocks):
pos = i * L1KEYSIZE
hstr = struct.unpack_from(fmt, m, pos)
y[i] = nh(k, hstr)
if blocks > fullblocks:
pos = fullblocks * L1KEYSIZE
ba = m[pos : pos + L1KEYSIZE]
ba += bytes(-len(ba) % 16)
cnt = len(ba) // 8
hstr = struct.unpack('<%dQ' % cnt, ba)
y[fullblocks] = nh(k, hstr)
return y
def l2_hash(self, m: bytes, bitlength: int) -> int:
t0, t1 = self.l2_keys
k = ((t0 & MASK_POLY) << 64) | (t1 & MASK_POLY)
if len(m) == 0:
y = k
else:
y = 1
for v in m:
y = (y * k + v) % P127
return (y + ((bitlength % (L1KEYSIZE * 8)) << 64)) % P127
def l3_hash(self, m: int) -> int:
k0, k1 = self.l3_keys
m0, m1 = divmod(m, PP)
return ((k0 + m0) * (k1 + m1)) % P64
def vhash(self, m: bytes) -> int:
t1 = self.l1_hash(m)
t2 = self.l2_hash(t1, 8 * len(m))
return self.l3_hash(t2)
def mac(self, m: bytes, nonce: bytes):
if len(nonce) > 16:
raise ValueError("Nonce too long")
elif len(nonce) == 16 and nonce[0] >= 128:
raise ValueError("Nonce must be smaller than 128-bits")
v = self.vhash(m)
m = struct.unpack('>Q', self.pdf(nonce))[0]
tag = (m + v) % 2 ** 64
return struct.pack('>Q', tag)
def tag(self, m: bytes, nonce: bytes) -> str:
return self.mac(m, nonce)
|
py
|
1a5d00fcc0747bc18428074c4271a45c2ae7f1e5
|
import re
from collections import defaultdict
with open('input.txt') as file:
data = file.read()
ALLERGENS = defaultdict(list)
FOOD = []
KNOWN = []
for line in data.splitlines():
if m := re.search(r'(.+) \(contains (.+)\)', line):
food = m[1].split(' ')
allergens = m[2].split(', ')
FOOD.append(food)
for a in allergens:
ALLERGENS[a].append(food)
else:
assert False, "chujowy regex?"
def learn(alg):
# generate common ingredients
common = set.intersection(*map(set, ALLERGENS[alg]))
# remove known ingredients
for known_ing, _ in KNOWN:
try:
common.remove(known_ing)
except:
pass
# if 1 left, add to known
if len(common) == 1:
KNOWN.append((*common, alg))
while len(KNOWN) < len(ALLERGENS):
for alg in ALLERGENS:
learn(alg)
cnt = 0
known_ing = set(ing for ing, alg in KNOWN)
cnt = sum(len(set(food) - known_ing) for food in FOOD)
print(f"Part 1: {cnt}")
result = ','.join(ing for ing, _ in sorted(KNOWN, key=lambda x: x[1]))
print(f"Part 2: {result}")
|
py
|
1a5d01371670795aa20a1e2f102226f0c752833a
|
import _plotly_utils.basevalidators
class HoverlabelValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(self, plotly_name="hoverlabel", parent_name="scattergl", **kwargs):
super(HoverlabelValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Hoverlabel"),
data_docs=kwargs.pop(
"data_docs",
"""
align
Sets the horizontal alignment of the text
content within hover label box. Has an effect
only if the hover label text spans more two or
more lines
alignsrc
Sets the source reference on Chart Studio Cloud
for align .
bgcolor
Sets the background color of the hover labels
for this trace
bgcolorsrc
Sets the source reference on Chart Studio Cloud
for bgcolor .
bordercolor
Sets the border color of the hover labels for
this trace.
bordercolorsrc
Sets the source reference on Chart Studio Cloud
for bordercolor .
font
Sets the font used in hover labels.
namelength
Sets the default length (in number of
characters) of the trace name in the hover
labels for all traces. -1 shows the whole name
regardless of length. 0-3 shows the first 0-3
characters, and an integer >3 will show the
whole name if it is less than that many
characters, but if it is longer, will truncate
to `namelength - 3` characters and add an
ellipsis.
namelengthsrc
Sets the source reference on Chart Studio Cloud
for namelength .
""",
),
**kwargs
)
|
py
|
1a5d01a583dba01683c03337d77523515e696487
|
#!/usr/bin/env python
# encoding: utf-8
"""
@author: sherlock
@contact: [email protected]
"""
import logging
import os
import sys
sys.path.append('.')
from fastreid.config import get_cfg
from fastreid.engine import DefaultTrainer, default_argument_parser, default_setup, launch
from fastreid.utils.checkpoint import Checkpointer
from fastreid.engine import hooks
from partialreid import *
class Trainer(DefaultTrainer):
@classmethod
def build_evaluator(cls, cfg, num_query, output_folder=None):
if output_folder is None:
output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
return DsrEvaluator(cfg, num_query)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_partialreid_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
if args.eval_only:
logger = logging.getLogger("fastreid.trainer")
cfg.defrost()
cfg.MODEL.BACKBONE.PRETRAIN = False
model = Trainer.build_model(cfg)
Checkpointer(model).load(cfg.MODEL.WEIGHTS) # load trained model
if cfg.TEST.PRECISE_BN.ENABLED and hooks.get_bn_modules(model):
prebn_cfg = cfg.clone()
prebn_cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
prebn_cfg.DATASETS.NAMES = tuple([cfg.TEST.PRECISE_BN.DATASET]) # set dataset name for PreciseBN
logger.info("Prepare precise BN dataset")
hooks.PreciseBN(
# Run at the same freq as (but before) evaluation.
model,
# Build a new data loader to not affect training
Trainer.build_train_loader(prebn_cfg),
cfg.TEST.PRECISE_BN.NUM_ITER,
).update_stats()
res = Trainer.test(cfg, model)
return res
trainer = Trainer(cfg)
trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
py
|
1a5d02bad22c896218791f5f817082905a0a8796
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_xen_is_installed(host):
f = host.file("/etc/xen")
assert f.exists
assert f.is_directory
|
py
|
1a5d02e39711d812b18f5ff2ae732447485ee568
|
# !/usr/bin/env python
# Created by "Thieu" at 21:18, 17/03/2020 ----------%
# Email: [email protected] %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import numpy as np
from copy import deepcopy
from mealpy.optimizer import Optimizer
class BaseTWO(Optimizer):
"""
The original version of: Tug of War Optimization (TWO)
Links:
1. https://www.researchgate.net/publication/332088054_Tug_of_War_Optimization_Algorithm
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.physics_based.TWO import BaseTWO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> model = BaseTWO(problem_dict1, epoch, pop_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] Kaveh, A., 2017. Tug of war optimization. In Advances in metaheuristic algorithms for
optimal design of structures (pp. 451-487). Springer, Cham.
"""
ID_POS = 0
ID_TAR = 1
ID_WEIGHT = 2
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
self.muy_s = 1
self.muy_k = 1
self.delta_t = 1
self.alpha = 0.99
self.beta = 0.1
def create_solution(self, lb=None, ub=None):
"""
To get the position, fitness wrapper, target and obj list
+ A[self.ID_POS] --> Return: position
+ A[self.ID_TAR] --> Return: [target, [obj1, obj2, ...]]
+ A[self.ID_TAR][self.ID_FIT] --> Return: target
+ A[self.ID_TAR][self.ID_OBJ] --> Return: [obj1, obj2, ...]
Returns:
list: wrapper of solution with format [position, target, weight]
"""
position = self.generate_position(lb, ub)
position = self.amend_position(position, lb, ub)
target = self.get_target_wrapper(position)
weight = 0.0
return [position, target, weight]
def _update_weight(self, teams):
_, best, worst = self.get_special_solutions(teams, best=1, worst=1)
best_fit = best[0][self.ID_TAR][self.ID_FIT]
worst_fit = worst[0][self.ID_TAR][self.ID_FIT]
if best_fit == worst_fit:
for i in range(self.pop_size):
teams[i][self.ID_WEIGHT] = np.random.uniform(0.5, 1.5)
else:
for i in range(self.pop_size):
teams[i][self.ID_WEIGHT] = (teams[i][self.ID_TAR][self.ID_FIT] - worst_fit) / (best_fit - worst_fit + self.EPSILON) + 1
return teams
def initialization(self):
self.pop = self.create_population(self.pop_size)
_, self.g_best = self.get_global_best_solution(self.pop)
self.pop = self._update_weight(self.pop)
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
pop_new = deepcopy(self.pop)
for i in range(self.pop_size):
pos_new = pop_new[i][self.ID_POS].astype(float)
for j in range(self.pop_size):
if self.pop[i][self.ID_WEIGHT] < self.pop[j][self.ID_WEIGHT]:
force = max(self.pop[i][self.ID_WEIGHT] * self.muy_s, self.pop[j][self.ID_WEIGHT] * self.muy_s)
resultant_force = force - self.pop[i][self.ID_WEIGHT] * self.muy_k
g = self.pop[j][self.ID_POS] - self.pop[i][self.ID_POS]
acceleration = resultant_force * g / (self.pop[i][self.ID_WEIGHT] * self.muy_k)
delta_x = 1 / 2 * acceleration + np.power(self.alpha, epoch + 1) * self.beta * \
(self.problem.ub - self.problem.lb) * np.random.randn(self.problem.n_dims)
pos_new += delta_x
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
for i in range(self.pop_size):
pos_new = pop_new[i][self.ID_POS].astype(float)
for j in range(self.problem.n_dims):
if pos_new[j] < self.problem.lb[j] or pos_new[j] > self.problem.ub[j]:
if np.random.random() <= 0.5:
pos_new[j] = self.g_best[self.ID_POS][j] + np.random.randn() / (epoch + 1) * \
(self.g_best[self.ID_POS][j] - pos_new[j])
if pos_new[j] < self.problem.lb[j] or pos_new[j] > self.problem.ub[j]:
pos_new[j] = self.pop[i][self.ID_POS][j]
else:
if pos_new[j] < self.problem.lb[j]:
pos_new[j] = self.problem.lb[j]
if pos_new[j] > self.problem.ub[j]:
pos_new[j] = self.problem.ub[j]
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_new = self.update_target_wrapper_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
self.pop = self._update_weight(pop_new)
class OppoTWO(BaseTWO):
"""
The opossition-based learning version of: Tug of War Optimization (OTWO)
Notes
~~~~~
+ Applied the idea of Opposition-based learning technique
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.physics_based.TWO import OppoTWO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> model = OppoTWO(problem_dict1, epoch, pop_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def initialization(self):
pop_temp = self.create_population(int(self.pop_size / 2))
pop_oppo = []
for i in range(len(pop_temp)):
pos_opposite = self.problem.ub + self.problem.lb - pop_temp[i][self.ID_POS]
pos_opposite = self.amend_position(pos_opposite, self.problem.lb, self.problem.ub)
pop_oppo.append([pos_opposite, None, 0.0])
pop_oppo = self.update_target_wrapper_population(pop_oppo)
self.pop = pop_temp + pop_oppo
self.pop = self._update_weight(self.pop)
_, self.g_best = self.get_global_best_solution(self.pop)
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
## Apply force of others solution on each individual solution
pop_new = deepcopy(self.pop)
for i in range(self.pop_size):
pos_new = pop_new[i][self.ID_POS].astype(float)
for j in range(self.pop_size):
if self.pop[i][self.ID_WEIGHT] < self.pop[j][self.ID_WEIGHT]:
force = max(self.pop[i][self.ID_WEIGHT] * self.muy_s, self.pop[j][self.ID_WEIGHT] * self.muy_s)
resultant_force = force - self.pop[i][self.ID_WEIGHT] * self.muy_k
g = self.pop[j][self.ID_POS] - self.pop[i][self.ID_POS]
acceleration = resultant_force * g / (self.pop[i][self.ID_WEIGHT] * self.muy_k)
delta_x = 1 / 2 * acceleration + np.power(self.alpha, epoch + 1) * self.beta * \
(self.problem.ub - self.problem.lb) * np.random.randn(self.problem.n_dims)
pos_new += delta_x
self.pop[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
## Amend solution and update fitness value
for i in range(self.pop_size):
pos_new = self.g_best[self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) / (epoch + 1) * \
(self.g_best[self.ID_POS] - pop_new[i][self.ID_POS])
conditions = np.logical_or(pop_new[i][self.ID_POS] < self.problem.lb, pop_new[i][self.ID_POS] > self.problem.ub)
conditions = np.logical_and(conditions, np.random.uniform(0, 1, self.problem.n_dims) < 0.5)
pos_new = np.where(conditions, pos_new, self.pop[i][self.ID_POS])
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_new = self.update_target_wrapper_population(pop_new)
## Opposition-based here
for i in range(self.pop_size):
if self.compare_agent(pop_new[i], self.pop[i]):
self.pop[i] = deepcopy(pop_new[i])
else:
C_op = self.create_opposition_position(self.pop[i][self.ID_POS], self.g_best[self.ID_POS])
C_op = self.amend_position(C_op, self.problem.lb, self.problem.ub)
target_op = self.get_target_wrapper(C_op)
if self.compare_agent([C_op, target_op], self.pop[i]):
self.pop[i] = [C_op, target_op, 0.0]
self.pop = self._update_weight(self.pop)
class LevyTWO(BaseTWO):
"""
The Levy-flight version of: Tug of War Optimization (LTWO)
Notes
~~~~~
+ Applied the idea of Levy-flight technique
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.physics_based.TWO import LevyTWO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> model = LevyTWO(problem_dict1, epoch, pop_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
pop_new = deepcopy(self.pop)
for i in range(self.pop_size):
pos_new = self.pop[i][self.ID_POS].astype(float)
for k in range(self.pop_size):
if self.pop[i][self.ID_WEIGHT] < self.pop[k][self.ID_WEIGHT]:
force = max(self.pop[i][self.ID_WEIGHT] * self.muy_s, self.pop[k][self.ID_WEIGHT] * self.muy_s)
resultant_force = force - self.pop[i][self.ID_WEIGHT] * self.muy_k
g = self.pop[k][self.ID_POS] - self.pop[i][self.ID_POS]
acceleration = resultant_force * g / (self.pop[i][self.ID_WEIGHT] * self.muy_k)
delta_x = 1 / 2 * acceleration + np.power(self.alpha, epoch + 1) * self.beta * \
(self.problem.ub - self.problem.lb) * np.random.randn(self.problem.n_dims)
pos_new +=delta_x
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
for i in range(self.pop_size):
pos_new = self.pop[i][self.ID_POS].astype(float)
for j in range(self.problem.n_dims):
if pos_new[j] < self.problem.lb[j] or pos_new[j] > self.problem.ub[j]:
if np.random.random() <= 0.5:
pos_new[j] = self.g_best[self.ID_POS][j] + np.random.randn() / (epoch + 1) * \
(self.g_best[self.ID_POS][j] - pos_new[j])
if pos_new[j] < self.problem.lb[j] or pos_new[j] > self.problem.ub[j]:
pos_new[j] = self.pop[i][self.ID_POS][j]
else:
if pos_new[j] < self.problem.lb[j]:
pos_new[j] = self.problem.lb[j]
if pos_new[j] > self.problem.ub[j]:
pos_new[j] = self.problem.ub[j]
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_new = self.update_target_wrapper_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
self.pop = self._update_weight(pop_new)
### Apply levy-flight here
for i in range(self.pop_size):
if self.compare_agent(pop_new[i], self.pop[i]):
self.pop[i] = deepcopy(pop_new[i])
else:
levy_step = self.get_levy_flight_step(beta=1.0, multiplier=10, case=-1)
pos_new = pop_new[i][self.ID_POS] + np.sign(np.random.random() - 0.5) * levy_step
pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
target = self.get_target_wrapper(pos_new)
self.pop[i] = [pos_new, target, 0.0]
self.pop = self._update_weight(pop_new)
class EnhancedTWO(OppoTWO, LevyTWO):
"""
The original version of: Enhenced Tug of War Optimization (ETWO)
Links:
1. https://doi.org/10.1016/j.procs.2020.03.063
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.physics_based.TWO import EnhancedTWO
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> r_rate = 0.3
>>> ps_rate = 0.85
>>> p_field = 0.1
>>> n_field = 0.45
>>> model = EnhancedTWO(problem_dict1, epoch, pop_size, r_rate, ps_rate, p_field, n_field)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] Nguyen, T., Hoang, B., Nguyen, G. and Nguyen, B.M., 2020. A new workload prediction model using
extreme learning machine and enhanced tug of war optimization. Procedia Computer Science, 170, pp.362-369.
"""
def __init__(self, problem, epoch=10000, pop_size=100, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
"""
super().__init__(problem, epoch, pop_size, **kwargs)
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def initialization(self):
pop_temp = self.create_population(self.pop_size)
pop_oppo = deepcopy(pop_temp)
for i in range(self.pop_size):
pos_opposite = self.problem.ub + self.problem.lb - pop_temp[i][self.ID_POS]
pop_oppo[i][self.ID_POS] = self.amend_position(pos_opposite, self.problem.lb, self.problem.ub)
pop_oppo = self.update_target_wrapper_population(pop_oppo)
self.pop = self.get_sorted_strim_population(pop_temp + pop_oppo, self.pop_size)
self.pop = self._update_weight(self.pop)
self.g_best = deepcopy(self.pop[0])
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
pop_new = deepcopy(self.pop)
for i in range(self.pop_size):
pos_new = self.pop[i][self.ID_POS].astype(float)
for k in range(self.pop_size):
if self.pop[i][self.ID_WEIGHT] < self.pop[k][self.ID_WEIGHT]:
force = max(self.pop[i][self.ID_WEIGHT] * self.muy_s, self.pop[k][self.ID_WEIGHT] * self.muy_s)
resultant_force = force - self.pop[i][self.ID_WEIGHT] * self.muy_k
g = self.pop[k][self.ID_POS] - self.pop[i][self.ID_POS]
acceleration = resultant_force * g / (self.pop[i][self.ID_WEIGHT] * self.muy_k)
delta_x = 1 / 2 * acceleration + np.power(self.alpha, epoch + 1) * self.beta * \
(self.problem.ub - self.problem.lb) * np.random.randn(self.problem.n_dims)
pos_new += delta_x
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
for i in range(self.pop_size):
pos_new = self.pop[i][self.ID_POS].astype(float)
for j in range(self.problem.n_dims):
if pos_new[j] < self.problem.lb[j] or pos_new[j] > self.problem.ub[j]:
if np.random.random() <= 0.5:
pos_new[j] = self.g_best[self.ID_POS][j] + np.random.randn() / (epoch + 1) * \
(self.g_best[self.ID_POS][j] - pos_new[j])
if pos_new[j] < self.problem.lb[j] or pos_new[j] > self.problem.ub[j]:
pos_new[j] = self.pop[i][self.ID_POS][j]
else:
if pos_new[j] < self.problem.lb[j]:
pos_new[j] = self.problem.lb[j]
if pos_new[j] > self.problem.ub[j]:
pos_new[j] = self.problem.ub[j]
pop_new[i][self.ID_POS] = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_new = self.update_target_wrapper_population(pop_new)
self.pop = self.greedy_selection_population(self.pop, pop_new)
self.pop = self._update_weight(pop_new)
for i in range(self.pop_size):
if self.compare_agent(pop_new[i], self.pop[i]):
self.pop[i] = deepcopy(pop_new[i])
else:
C_op = self.create_opposition_position(self.pop[i][self.ID_POS], self.g_best[self.ID_POS])
C_op = self.amend_position(C_op, self.problem.lb, self.problem.ub)
target_op = self.get_target_wrapper(C_op)
if self.compare_agent([C_op, target_op], self.pop[i]):
self.pop[i] = [C_op, target_op, 0.0]
else:
levy_step = self.get_levy_flight_step(beta=1.0, multiplier=0.001, case=-1)
pos_new = pop_new[i][self.ID_POS] + 1.0 / np.sqrt(epoch + 1) * np.sign(np.random.random() - 0.5) * levy_step
pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
target = self.get_target_wrapper(pos_new)
self.pop[i] = [pos_new, target, 0.0]
self.pop = self._update_weight(pop_new)
|
py
|
1a5d0319df59b34af6ed380ac4de232dbb5296e4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import boilerplate
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = boilerplate.__version__
if sys.argv[-1] == 'publish':
os.system('cd docs && make html')
os.system('python setup.py sdist upload')
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
if sys.argv[-1] == 'test':
print("Running tests only on current environment.")
print("Use `tox` for testing multiple environments.")
os.system('python manage.py test')
sys.exit()
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
setup(
name='django-boilerplate',
version=version,
description="""What Django is missing""",
long_description=readme + '\n\n' + history,
author='Irving Kcam',
author_email='[email protected]',
url='https://github.com/cubope/django-boilerplate',
packages=[
'boilerplate',
],
include_package_data=True,
install_requires=['Pillow', 'six'],
license="Apache License 2.0",
zip_safe=False,
keywords='django-boilerplate',
classifiers=[
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries",
"Topic :: Utilities",
"Environment :: Web Environment",
"Framework :: Django",
],
)
|
py
|
1a5d05094e53cfecd9476d7d87f023e8a02d7458
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tensorflow.ops.reverse_sequence_op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.compiler.tests.xla_test import XLATestCase
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class ReverseSequenceTest(XLATestCase):
def _testReverseSequence(self,
x,
batch_axis,
seq_axis,
seq_lengths,
truth,
expected_err_re=None):
with self.test_session():
p = array_ops.placeholder(dtypes.as_dtype(x.dtype))
lengths = array_ops.placeholder(dtypes.as_dtype(seq_lengths.dtype))
with self.test_scope():
ans = array_ops.reverse_sequence(
p, batch_axis=batch_axis, seq_axis=seq_axis, seq_lengths=lengths)
if expected_err_re is None:
tf_ans = ans.eval(feed_dict={p: x, lengths: seq_lengths})
self.assertAllClose(tf_ans, truth, atol=1e-10)
else:
with self.assertRaisesOpError(expected_err_re):
ans.eval(feed_dict={p: x, lengths: seq_lengths})
def testSimple(self):
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.int32)
expected = np.array([[1, 2, 3], [6, 5, 4], [8, 7, 9]], dtype=np.int32)
self._testReverseSequence(
x,
batch_axis=0,
seq_axis=1,
seq_lengths=np.array([1, 3, 2], np.int32),
truth=expected)
def _testBasic(self, dtype, len_dtype):
x = np.asarray(
[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]],
[[17, 18, 19, 20], [21, 22, 23, 24]]],
dtype=dtype)
x = x.reshape(3, 2, 4, 1, 1)
x = x.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
# reverse dim 2 up to (0:3, none, 0:4) along dim=0
seq_lengths = np.asarray([3, 0, 4], dtype=len_dtype)
truth_orig = np.asarray(
[
[[3, 2, 1, 4], [7, 6, 5, 8]], # reverse 0:3
[[9, 10, 11, 12], [13, 14, 15, 16]], # reverse none
[[20, 19, 18, 17], [24, 23, 22, 21]]
], # reverse 0:4 (all)
dtype=dtype)
truth_orig = truth_orig.reshape(3, 2, 4, 1, 1)
truth = truth_orig.transpose([2, 1, 0, 3, 4]) # permute axes 0 <=> 2
seq_axis = 0 # permute seq_axis and batch_axis (originally 2 and 0, resp.)
batch_axis = 2
self._testReverseSequence(x, batch_axis, seq_axis, seq_lengths, truth)
def testSeqLength(self):
for dtype in self.all_types:
for seq_dtype in self.int_types:
self._testBasic(dtype, seq_dtype)
if __name__ == "__main__":
test.main()
|
py
|
1a5d062fc660f00463c1131bb2b1cdb5b3759d52
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Parametrizable families of optimizers.
Caution
-------
This module and its available classes are experimental and may change quickly in the near future.
"""
from .optimizerlib import ParametrizedOnePlusOne
from .optimizerlib import ParametrizedBO
from .optimizerlib import Chaining
from .differentialevolution import DifferentialEvolution
from .recastlib import ScipyOptimizer
from .oneshot import RandomSearchMaker
from .oneshot import SamplingSearch
__all__ = ["ParametrizedOnePlusOne", "ParametrizedBO", "DifferentialEvolution",
"ScipyOptimizer", "RandomSearchMaker", "SamplingSearch", "Chaining"]
|
py
|
1a5d067f750d01bd8999feeed5baee9b78d131e9
|
from lib.utilities import get_truncated_normal, get_truncated_exponential
from lib import static
class DistributionsManager:
def __init__(self, yaml_parser):
self.favorite_subjects_per_user_distribution = self.build_favorite_subjects_distribution(yaml_parser)
self.subjects_distribution = self.build_subjects_distribution(yaml_parser)
self.age_distribution = self.build_age_distribution(yaml_parser)
self.likes_distribution = self.build_likes_distribution(yaml_parser)
self.followers_distribution = self.build_followers_distribution(yaml_parser)
self.salary_distribution_per_occupation = self.build_compensations_distribution(yaml_parser)
@staticmethod
def build_favorite_subjects_distribution(yaml_parser):
distribution_type = yaml_parser.favorite_subjects_per_user_distribution
if distribution_type is static.NORMAL:
try:
s = get_truncated_normal(yaml_parser.favorite_subjects_per_user_mean,
yaml_parser.favorite_subjects_per_user_sd,
yaml_parser.favorite_subjects_per_user_lower_bound,
yaml_parser.favorite_subjects_per_user_upper_bound)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_normal(static.NORMAL_DISTRIBUTION[1],
static.NORMAL_DISTRIBUTION[2],
static.NORMAL_DISTRIBUTION[3],
static.NORMAL_DISTRIBUTION[4])
return s
else:
try:
s = get_truncated_exponential(yaml_parser.favorite_subjects_per_user_upper_bound,
yaml_parser.favorite_subjects_per_user_lower_bound,
yaml_parser.favorite_subjects_scale)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_exponential(static.EXPONENTIAL_DISTRIBUTION[4],
static.EXPONENTIAL_DISTRIBUTION[3],
static.EXPONENTIAL_DISTRIBUTION[5])
return s
@staticmethod
def build_subjects_distribution(yaml_parser):
distribution_type = yaml_parser.subjects_distribution
if distribution_type is static.NORMAL:
try:
s = get_truncated_normal(yaml_parser.subjects_mean,
yaml_parser.subjects_sd,
yaml_parser.subjects_lower_bound,
yaml_parser.subjects_upper_bound)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_normal(static.NORMAL_DISTRIBUTION[1],
static.NORMAL_DISTRIBUTION[2],
static.NORMAL_DISTRIBUTION[3],
static.NORMAL_DISTRIBUTION[4])
return s
else:
try:
s = get_truncated_exponential(yaml_parser.subjects_upper_bound,
yaml_parser.subjects_lower_bound,
yaml_parser.subjects_scale)
except:
print('Incorrect parameters for that kind of distribution. Using default value.')
s = get_truncated_exponential(static.EXPONENTIAL_DISTRIBUTION[4],
static.EXPONENTIAL_DISTRIBUTION[3],
static.EXPONENTIAL_DISTRIBUTION[5])
return s
@staticmethod
def build_age_distribution(yaml_parser):
return get_truncated_normal(yaml_parser.age_mean,
yaml_parser.age_sd,
yaml_parser.age_lower_bound,
yaml_parser.age_upper_bound)
@staticmethod
def build_compensations_distribution(yaml_parser):
salary_distribution_per_occupation = {}
for occupation in yaml_parser.compensations_distribution:
prob = yaml_parser.compensations_distribution[occupation]
salary_distribution_per_occupation[occupation] = get_truncated_normal(prob[0], prob[1], prob[2], prob[3])
return salary_distribution_per_occupation
@staticmethod
def build_likes_distribution(yaml_parser):
return get_truncated_exponential(yaml_parser.likes_upper_bound, yaml_parser.likes_lower_bound,
yaml_parser.likes_scale)
@staticmethod
def build_followers_distribution(yaml_parser):
return get_truncated_exponential(yaml_parser.followers_upper_bound, yaml_parser.followers_lower_bound,
yaml_parser.followers_scale)
|
py
|
1a5d0721ac226679d46aa3e40020864f7909eb6f
|
import numpy as np
import scipy.io.wavfile
import sys
from aubio import source, pitch
win_s = 4096
hop_s = 512
out = scipy.io.wavfile.read("StarWars60.wav", mmap=False)
a, b = out
your_file = "StarWars60.wav"
samplerate = a
s = source(your_file, samplerate, hop_s)
samplerate = s.samplerate
tolerance = 0.8
pitch_o = pitch("yin", win_s, hop_s, samplerate)
pitch_o.set_unit("midi")
pitch_o.set_tolerance(tolerance)
pitches = []
confidences = []
total_frames = 0
while True:
samples, read = s()
pitch = pitch_o(samples)[0]
pitches += [pitch]
confidence = pitch_o.get_confidence()
confidences += [confidence]
total_frames += read
if read < hop_s: break
print("Average frequency = " + str(np.array(pitches).mean()) + " hz")
|
py
|
1a5d079b00fed40bae25e4f9094bdee22c4cde2f
|
import logging
from typing import Dict, List, Optional, Tuple, Any
import aiosqlite
import zstd
from chia.consensus.block_record import BlockRecord
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.types.full_block import FullBlock
from chia.types.weight_proof import SubEpochChallengeSegment, SubEpochSegments
from chia.util.db_wrapper import DBWrapper
from chia.util.ints import uint32
from chia.util.lru_cache import LRUCache
log = logging.getLogger(__name__)
class BlockStore:
db: aiosqlite.Connection
block_cache: LRUCache
db_wrapper: DBWrapper
ses_challenge_cache: LRUCache
@classmethod
async def create(cls, db_wrapper: DBWrapper):
self = cls()
# All full blocks which have been added to the blockchain. Header_hash -> block
self.db_wrapper = db_wrapper
self.db = db_wrapper.db
if self.db_wrapper.db_version == 2:
# TODO: most data in block is duplicated in block_record. The only
# reason for this is that our parsing of a FullBlock is so slow,
# it's faster to store duplicate data to parse less when we just
# need the BlockRecord. Once we fix the parsing (and data structure)
# of FullBlock, this can use less space
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks("
"header_hash blob PRIMARY KEY,"
"prev_hash blob,"
"height bigint,"
"sub_epoch_summary blob,"
"is_fully_compactified tinyint,"
"in_main_chain tinyint,"
"block blob,"
"block_record blob)"
)
# This is a single-row table containing the hash of the current
# peak. The "key" field is there to make update statements simple
await self.db.execute("CREATE TABLE IF NOT EXISTS current_peak(key int PRIMARY KEY, hash blob)")
await self.db.execute("CREATE INDEX IF NOT EXISTS height on full_blocks(height)")
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3("
"ses_block_hash blob PRIMARY KEY,"
"challenge_segments blob)"
)
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_fully_compactified ON"
" full_blocks(is_fully_compactified, in_main_chain) WHERE in_main_chain=1"
)
await self.db.execute(
"CREATE INDEX IF NOT EXISTS main_chain ON full_blocks(height, in_main_chain) WHERE in_main_chain=1"
)
else:
await self.db.execute(
"CREATE TABLE IF NOT EXISTS full_blocks(header_hash text PRIMARY KEY, height bigint,"
" is_block tinyint, is_fully_compactified tinyint, block blob)"
)
# Block records
await self.db.execute(
"CREATE TABLE IF NOT EXISTS block_records(header_hash "
"text PRIMARY KEY, prev_hash text, height bigint,"
"block blob, sub_epoch_summary blob, is_peak tinyint, is_block tinyint)"
)
# Sub epoch segments for weight proofs
await self.db.execute(
"CREATE TABLE IF NOT EXISTS sub_epoch_segments_v3(ses_block_hash text PRIMARY KEY,"
"challenge_segments blob)"
)
# Height index so we can look up in order of height for sync purposes
await self.db.execute("CREATE INDEX IF NOT EXISTS full_block_height on full_blocks(height)")
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_fully_compactified on full_blocks(is_fully_compactified)"
)
await self.db.execute("CREATE INDEX IF NOT EXISTS height on block_records(height)")
if self.db_wrapper.allow_upgrades:
await self.db.execute("DROP INDEX IF EXISTS hh")
await self.db.execute("DROP INDEX IF EXISTS is_block")
await self.db.execute("DROP INDEX IF EXISTS peak")
await self.db.execute(
"CREATE INDEX IF NOT EXISTS is_peak_eq_1_idx on block_records(is_peak) where is_peak = 1"
)
else:
await self.db.execute("CREATE INDEX IF NOT EXISTS peak on block_records(is_peak) where is_peak = 1")
await self.db.commit()
self.block_cache = LRUCache(1000)
self.ses_challenge_cache = LRUCache(50)
return self
def maybe_from_hex(self, field: Any) -> bytes:
if self.db_wrapper.db_version == 2:
return field
else:
return bytes.fromhex(field)
def maybe_to_hex(self, field: bytes) -> Any:
if self.db_wrapper.db_version == 2:
return field
else:
return field.hex()
def compress(self, block: FullBlock) -> bytes:
return zstd.compress(bytes(block))
def maybe_decompress(self, block_bytes: bytes) -> FullBlock:
if self.db_wrapper.db_version == 2:
return FullBlock.from_bytes(zstd.decompress(block_bytes))
else:
return FullBlock.from_bytes(block_bytes)
async def rollback(self, height: int) -> None:
if self.db_wrapper.db_version == 2:
await self.db.execute(
"UPDATE OR FAIL full_blocks SET in_main_chain=0 WHERE height>? AND in_main_chain=1", (height,)
)
async def set_in_chain(self, header_hashes: List[Tuple[bytes32]]) -> None:
if self.db_wrapper.db_version == 2:
await self.db.executemany(
"UPDATE OR FAIL full_blocks SET in_main_chain=1 WHERE header_hash=?", header_hashes
)
async def add_full_block(self, header_hash: bytes32, block: FullBlock, block_record: BlockRecord) -> None:
self.block_cache.put(header_hash, block)
if self.db_wrapper.db_version == 2:
ses: Optional[bytes] = (
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included)
)
await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?, ?, ?, ?)",
(
header_hash,
block.prev_header_hash,
block.height,
ses,
int(block.is_fully_compactified()),
0, # in_main_chain
self.compress(block),
bytes(block_record),
),
)
else:
await self.db.execute(
"INSERT OR REPLACE INTO full_blocks VALUES(?, ?, ?, ?, ?)",
(
header_hash.hex(),
block.height,
int(block.is_transaction_block()),
int(block.is_fully_compactified()),
bytes(block),
),
)
await self.db.execute(
"INSERT OR REPLACE INTO block_records VALUES(?, ?, ?, ?,?, ?, ?)",
(
header_hash.hex(),
block.prev_header_hash.hex(),
block.height,
bytes(block_record),
None
if block_record.sub_epoch_summary_included is None
else bytes(block_record.sub_epoch_summary_included),
False,
block.is_transaction_block(),
),
)
async def persist_sub_epoch_challenge_segments(
self, ses_block_hash: bytes32, segments: List[SubEpochChallengeSegment]
) -> None:
async with self.db_wrapper.lock:
await self.db.execute(
"INSERT OR REPLACE INTO sub_epoch_segments_v3 VALUES(?, ?)",
(self.maybe_to_hex(ses_block_hash), bytes(SubEpochSegments(segments))),
)
await self.db.commit()
async def get_sub_epoch_challenge_segments(
self,
ses_block_hash: bytes32,
) -> Optional[List[SubEpochChallengeSegment]]:
cached = self.ses_challenge_cache.get(ses_block_hash)
if cached is not None:
return cached
async with self.db.execute(
"SELECT challenge_segments from sub_epoch_segments_v3 WHERE ses_block_hash=?",
(self.maybe_to_hex(ses_block_hash),),
) as cursor:
row = await cursor.fetchone()
if row is not None:
challenge_segments = SubEpochSegments.from_bytes(row[0]).challenge_segments
self.ses_challenge_cache.put(ses_block_hash, challenge_segments)
return challenge_segments
return None
def rollback_cache_block(self, header_hash: bytes32):
try:
self.block_cache.remove(header_hash)
except KeyError:
# this is best effort. When rolling back, we may not have added the
# block to the cache yet
pass
async def get_full_block(self, header_hash: bytes32) -> Optional[FullBlock]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return cached
log.debug(f"cache miss for block {header_hash.hex()}")
async with self.db.execute(
"SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
) as cursor:
row = await cursor.fetchone()
if row is not None:
block = self.maybe_decompress(row[0])
self.block_cache.put(header_hash, block)
return block
return None
async def get_full_block_bytes(self, header_hash: bytes32) -> Optional[bytes]:
cached = self.block_cache.get(header_hash)
if cached is not None:
log.debug(f"cache hit for block {header_hash.hex()}")
return bytes(cached)
log.debug(f"cache miss for block {header_hash.hex()}")
async with self.db.execute(
"SELECT block from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
) as cursor:
row = await cursor.fetchone()
if row is not None:
if self.db_wrapper.db_version == 2:
return zstd.decompress(row[0])
else:
return row[0]
return None
async def get_full_blocks_at(self, heights: List[uint32]) -> List[FullBlock]:
if len(heights) == 0:
return []
heights_db = tuple(heights)
formatted_str = f'SELECT block from full_blocks WHERE height in ({"?," * (len(heights_db) - 1)}?)'
async with self.db.execute(formatted_str, heights_db) as cursor:
ret: List[FullBlock] = []
for row in await cursor.fetchall():
ret.append(self.maybe_decompress(row[0]))
return ret
async def get_block_records_by_hash(self, header_hashes: List[bytes32]):
"""
Returns a list of Block Records, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
all_blocks: Dict[bytes32, BlockRecord] = {}
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT header_hash,block_record FROM full_blocks "
f'WHERE header_hash in ({"?," * (len(header_hashes) - 1)}?)',
tuple(header_hashes),
) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(row[0])
all_blocks[header_hash] = BlockRecord.from_bytes(row[1])
else:
formatted_str = f'SELECT block from block_records WHERE header_hash in ({"?," * (len(header_hashes) - 1)}?)'
async with self.db.execute(formatted_str, tuple([hh.hex() for hh in header_hashes])) as cursor:
for row in await cursor.fetchall():
block_rec: BlockRecord = BlockRecord.from_bytes(row[0])
all_blocks[block_rec.header_hash] = block_rec
ret: List[BlockRecord] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_blocks_by_hash(self, header_hashes: List[bytes32]) -> List[FullBlock]:
"""
Returns a list of Full Blocks blocks, ordered by the same order in which header_hashes are passed in.
Throws an exception if the blocks are not present
"""
if len(header_hashes) == 0:
return []
header_hashes_db: Tuple[Any, ...]
if self.db_wrapper.db_version == 2:
header_hashes_db = tuple(header_hashes)
else:
header_hashes_db = tuple([hh.hex() for hh in header_hashes])
formatted_str = (
f'SELECT header_hash, block from full_blocks WHERE header_hash in ({"?," * (len(header_hashes_db) - 1)}?)'
)
all_blocks: Dict[bytes32, FullBlock] = {}
async with self.db.execute(formatted_str, header_hashes_db) as cursor:
for row in await cursor.fetchall():
header_hash = self.maybe_from_hex(row[0])
full_block: FullBlock = self.maybe_decompress(row[1])
# TODO: address hint error and remove ignore
# error: Invalid index type "bytes" for "Dict[bytes32, FullBlock]";
# expected type "bytes32" [index]
all_blocks[header_hash] = full_block # type: ignore[index]
self.block_cache.put(header_hash, full_block)
ret: List[FullBlock] = []
for hh in header_hashes:
if hh not in all_blocks:
raise ValueError(f"Header hash {hh} not in the blockchain")
ret.append(all_blocks[hh])
return ret
async def get_block_record(self, header_hash: bytes32) -> Optional[BlockRecord]:
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT block_record FROM full_blocks WHERE header_hash=?",
(header_hash,),
) as cursor:
row = await cursor.fetchone()
if row is not None:
return BlockRecord.from_bytes(row[0])
else:
async with self.db.execute(
"SELECT block from block_records WHERE header_hash=?",
(header_hash.hex(),),
) as cursor:
row = await cursor.fetchone()
if row is not None:
return BlockRecord.from_bytes(row[0])
return None
async def get_block_records_in_range(
self,
start: int,
stop: int,
) -> Dict[bytes32, BlockRecord]:
"""
Returns a dictionary with all blocks in range between start and stop
if present.
"""
ret: Dict[bytes32, BlockRecord] = {}
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT header_hash, block_record FROM full_blocks WHERE height >= ? AND height <= ?",
(start, stop),
) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(row[0])
ret[header_hash] = BlockRecord.from_bytes(row[1])
else:
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {start} and height <= {stop}"
async with await self.db.execute(formatted_str) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(self.maybe_from_hex(row[0]))
ret[header_hash] = BlockRecord.from_bytes(row[1])
return ret
async def get_peak(self) -> Optional[Tuple[bytes32, uint32]]:
if self.db_wrapper.db_version == 2:
async with self.db.execute("SELECT hash FROM current_peak WHERE key = 0") as cursor:
peak_row = await cursor.fetchone()
if peak_row is None:
return None
async with self.db.execute("SELECT height FROM full_blocks WHERE header_hash=?", (peak_row[0],)) as cursor:
peak_height = await cursor.fetchone()
if peak_height is None:
return None
return bytes32(peak_row[0]), uint32(peak_height[0])
else:
async with self.db.execute("SELECT header_hash, height from block_records WHERE is_peak = 1") as cursor:
peak_row = await cursor.fetchone()
if peak_row is None:
return None
return bytes32(bytes.fromhex(peak_row[0])), uint32(peak_row[1])
async def get_block_records_close_to_peak(
self, blocks_n: int
) -> Tuple[Dict[bytes32, BlockRecord], Optional[bytes32]]:
"""
Returns a dictionary with all blocks that have height >= peak height - blocks_n, as well as the
peak header hash.
"""
peak = await self.get_peak()
if peak is None:
return {}, None
ret: Dict[bytes32, BlockRecord] = {}
if self.db_wrapper.db_version == 2:
async with self.db.execute(
"SELECT header_hash, block_record FROM full_blocks WHERE height >= ?",
(peak[1] - blocks_n,),
) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(row[0])
ret[header_hash] = BlockRecord.from_bytes(row[1])
else:
formatted_str = f"SELECT header_hash, block from block_records WHERE height >= {peak[1] - blocks_n}"
async with self.db.execute(formatted_str) as cursor:
for row in await cursor.fetchall():
header_hash = bytes32(self.maybe_from_hex(row[0]))
ret[header_hash] = BlockRecord.from_bytes(row[1])
return ret, peak[0]
async def set_peak(self, header_hash: bytes32) -> None:
# We need to be in a sqlite transaction here.
# Note: we do not commit this to the database yet, as we need to also change the coin store
if self.db_wrapper.db_version == 2:
# Note: we use the key field as 0 just to ensure all inserts replace the existing row
await self.db.execute("INSERT OR REPLACE INTO current_peak VALUES(?, ?)", (0, header_hash))
else:
await self.db.execute("UPDATE block_records SET is_peak=0 WHERE is_peak=1")
await self.db.execute(
"UPDATE block_records SET is_peak=1 WHERE header_hash=?",
(self.maybe_to_hex(header_hash),),
)
async def is_fully_compactified(self, header_hash: bytes32) -> Optional[bool]:
async with self.db.execute(
"SELECT is_fully_compactified from full_blocks WHERE header_hash=?", (self.maybe_to_hex(header_hash),)
) as cursor:
row = await cursor.fetchone()
if row is None:
return None
return bool(row[0])
async def get_random_not_compactified(self, number: int) -> List[int]:
if self.db_wrapper.db_version == 2:
async with self.db.execute(
f"SELECT height FROM full_blocks WHERE in_main_chain=1 AND is_fully_compactified=0 "
f"ORDER BY RANDOM() LIMIT {number}"
) as cursor:
rows = await cursor.fetchall()
else:
# Since orphan blocks do not get compactified, we need to check whether all blocks with a
# certain height are not compact. And if we do have compact orphan blocks, then all that
# happens is that the occasional chain block stays uncompact - not ideal, but harmless.
async with self.db.execute(
f"SELECT height FROM full_blocks GROUP BY height HAVING sum(is_fully_compactified)=0 "
f"ORDER BY RANDOM() LIMIT {number}"
) as cursor:
rows = await cursor.fetchall()
heights = [int(row[0]) for row in rows]
return heights
|
py
|
1a5d0a7cf816e727661e9701ec189b9fd373e1cd
|
# -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
# --------------------------------------------------------------------------
"""
ONNX Runtime is a performance-focused scoring engine for Open Neural Network Exchange (ONNX) models.
For more information on ONNX Runtime, please see `aka.ms/onnxruntime <https://aka.ms/onnxruntime/>`_
or the `Github project <https://github.com/microsoft/onnxruntime/>`_.
"""
__version__ = "1.12.0"
__author__ = "Microsoft"
# we need to do device version validation (for example to check Cuda version for an onnxruntime-training package).
# in order to know whether the onnxruntime package is for training it needs
# to do import onnxruntime.training.ortmodule first.
# onnxruntime.capi._pybind_state is required before import onnxruntime.training.ortmodule.
# however, import onnxruntime.capi._pybind_state will already raise an exception if a required Cuda version
# is not found.
# here we need to save the exception and continue with Cuda version validation in order to post
# meaningful messages to the user.
# the saved exception is raised after device version validation.
try:
from onnxruntime.capi._pybind_state import get_all_providers, get_available_providers, get_device, set_seed, \
RunOptions, SessionOptions, set_default_logger_severity, enable_telemetry_events, disable_telemetry_events, \
NodeArg, ModelMetadata, GraphOptimizationLevel, ExecutionMode, ExecutionOrder, SessionIOBinding, \
OrtAllocatorType, OrtMemType, OrtArenaCfg, OrtMemoryInfo, create_and_register_allocator, OrtSparseFormat, \
set_default_logger_verbosity
import_capi_exception = None
except Exception as e:
import_capi_exception = e
from onnxruntime.capi import onnxruntime_validation
if import_capi_exception:
raise import_capi_exception
from onnxruntime.capi.onnxruntime_inference_collection import InferenceSession, IOBinding, OrtValue, SparseTensor, \
OrtDevice
from onnxruntime.capi.training import * # noqa: F403
# TODO: thiagofc: Temporary experimental namespace for new PyTorch front-end
try:
from . import experimental
except ImportError:
pass
from onnxruntime.capi.onnxruntime_validation import package_name, version, cuda_version
if version:
__version__ = version
onnxruntime_validation.check_distro_info()
|
py
|
1a5d0a8b686df1ebdf166587e99efe5babfff546
|
import numpy as np
import sys, os
import yaml
import trimesh
parent_dir = os.path.dirname(os.getcwd())
pykin_path = parent_dir + "/../../"
sys.path.append(pykin_path)
from pykin.robots.single_arm import SingleArm
from pykin.planners.rrt_star_planner import RRTStarPlanner
from pykin.collision.collision_manager import CollisionManager
from pykin.kinematics.transform import Transform
from pykin.utils.object_utils import ObjectManager
from pykin.utils import plot_utils as plt
fig, ax = plt.init_3d_figure(figsize=(10,6), dpi= 100)
file_path = '../../../asset/urdf/sawyer/sawyer.urdf'
mesh_path = pykin_path+"/asset/urdf/sawyer/"
yaml_path = '../../../asset/config/sawyer_init_params.yaml'
with open(yaml_path) as f:
controller_config = yaml.safe_load(f)
robot = SingleArm(file_path, Transform(rot=[0.0, 0.0, 0], pos=[0, 0, 0]))
robot.setup_link_name("sawyer_base", "sawyer_right_hand")
##################################################################
init_qpos = controller_config["init_qpos"]
init_fk = robot.forward_kin(np.concatenate((np.zeros(1), init_qpos)))
init_eef_pose = robot.get_eef_pose(init_fk)
goal_eef_pose = controller_config["goal_pose"]
c_manager = CollisionManager(mesh_path)
c_manager.setup_robot_collision(robot, init_fk)
milk_path = pykin_path+"/asset/objects/meshes/milk.stl"
milk_mesh = trimesh.load_mesh(milk_path)
obs = ObjectManager()
o_manager = CollisionManager(milk_path)
for i in range(9):
name = "miik_" + str(i)
if i < 3:
obs_pos = [0.3, -0.5 + i * 0.5, 0.3]
elif 3 <= i < 6:
obs_pos = [0.3, -0.5 + (i-3) * 0.5, 0.9]
else:
obs_pos = [0.3, -0.5 + (i-6) * 0.5, -0.3]
o_manager.add_object(name, gtype="mesh", gparam=milk_mesh, transform=Transform(pos=obs_pos).h_mat)
obs(name=name, gtype="mesh", gparam=milk_mesh, transform=Transform(pos=obs_pos).h_mat)
##################################################################
planner = RRTStarPlanner(
robot=robot,
self_collision_manager=c_manager,
object_collision_manager=o_manager,
delta_distance=0.1,
epsilon=0.2,
max_iter=300,
gamma_RRT_star=0.2,
dimension=7,
n_step=1
)
joint_path,_ = planner.get_path_in_joinst_space(cur_q=init_qpos, goal_pose=goal_eef_pose)
if joint_path is None :
print("Cannot Visulization Path")
exit()
joint_trajectory = []
eef_poses = []
for step, joint in enumerate(joint_path):
transformations = robot.forward_kin(np.concatenate((np.zeros(1),joint)))
joint_trajectory.append(transformations)
eef_poses.append(transformations[robot.eef_name].pos)
plt.plot_animation(
robot,
joint_trajectory,
fig,
ax,
eef_poses=eef_poses,
objects=obs,
visible_objects=True,
visible_collision=True,
interval=1,
repeat=True)
|
py
|
1a5d0a919f8078599bf637b188f8d03241e11494
|
#!/usr/bin/env python
from __future__ import unicode_literals
from mptt import VERSION
requires = ()
try:
from setuptools import setup
kwargs = {str('install_requires'): requires}
except ImportError:
from distutils.core import setup
kwargs = {str('requires'): requires}
# Dynamically calculate the version based on mptt.VERSION
version_tuple = VERSION
version = ".".join([str(v) for v in version_tuple])
# on py3, all these are text strings
# on py2, they're all byte strings.
# ... and that's how setuptools likes it.
setup(
name=str('django-mptt'),
description=str('''Utilities for implementing Modified Preorder Tree Traversal
with your Django Models and working with trees of Model instances.'''),
version=version,
author=str('Craig de Stigter'),
author_email=str('[email protected]'),
url=str('http://github.com/django-mptt/django-mptt'),
packages=[str('mptt'), str('mptt.templatetags')],
package_data={str('mptt'): [str('templates/admin/*'), str('locale/*/*/*.*')]},
classifiers=[
str('Development Status :: 4 - Beta'),
str('Environment :: Web Environment'),
str('Framework :: Django'),
str('Intended Audience :: Developers'),
str('License :: OSI Approved :: MIT License'),
str('Operating System :: OS Independent'),
str('Programming Language :: Python'),
str('Programming Language :: Python :: 2'),
str('Programming Language :: Python :: 3'),
str('Topic :: Utilities'),
],
**kwargs
)
|
py
|
1a5d0b2f04a1b290e25e5da299cf7d0b4f5d8f3e
|
"""Test parse log."""
from friendly_parakeet.parse_log import parse_logs
test_input = """## LOG FILE
Rule Apple:
Line 1 message
Line 2 message
ERROR: Input is wrong
Line 3 message
ERROR: Another wrong input
Rule Peach:
Line 1 message
Line 2 message
Rule Kiwi:
ERROR: Fatal Error
Rule Apple:
ERROR: Input is still wrong
"""
expected_out = """Rule Apple: 3 ERRORS
Rule Peach: 0 ERRORS
Rule Kiwi: 1 ERROR"""
def test_log_parse():
assert parse_logs(test_input) == expected_out
|
py
|
1a5d0b89617802ae0b4572e4b017cce590df086a
|
file = open('file.txt')
print(file.read())
|
py
|
1a5d0bc61dc50ef287d290738abf5cddc2a283c4
|
import matplotlib
matplotlib.use('Agg')
#matplotlib.use("gtk")
#matplotlib.use('Qt5Agg')
from table_functions import *
import pickle
import os
import pandas as pd
import matplotlib.pyplot as plt
import sys
#sys.path.insert()
# print(data)
import numpy as np
import os
from scipy import stats
from matplotlib.pyplot import figure
import glob
import numpy as np
from hist_functions import *
import scipy.stats
from pathlib import Path
# ipdb.set_trace()
import ipdb
from scatter_plot_functions import *
from rectify_vars_and_wald_functions import *
from checkpickle_EFFECT_new import parse_dir
SMALL_SIZE = 13
MEDIUM_SIZE = 10
BIGGER_SIZE = 14
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=8.5) # fontsize of the tick labels
plt.rc('ytick', labelsize=10) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure title
PROP_EPS_KEY = "prop_exploring_ppd_cuml"
def plot_hist_and_table(df_for_num_steps_eg0pt1, df_for_num_steps_eg0pt3, df_for_num_steps_ts, df_for_num_steps_unif, num_steps, epsilon, n):
fig_h, ax_h = plt.subplots()
proportions_unif = df_for_num_steps_unif['sample_size_1'] / num_steps
proportions_eg0pt1 = df_for_num_steps_eg0pt1['sample_size_1'] / num_steps
proportions_eg0pt3 = df_for_num_steps_eg0pt3['sample_size_1'] / num_steps
proportions_ts = df_for_num_steps_ts['sample_size_1'] / num_steps
ax_h.hist(proportions_eg0pt1, alpha = 0.5, label = "Epsilon Greedy 0.1")
ax_h.hist(proportions_eg0pt3, alpha = 0.5, label = "Epsilon Greedy 0.3")
ax_h.hist(proportions_unif, alpha = 0.5, label = "Uniform Random")
ax_h.hist(proportions_ts, alpha = 0.5, label = "Thompson Sampling")
ax_h.legend()
fig_h.suptitle("Histogram of Proportion of {} Participants Assigned to Condition 1 Across 500 Simulations".format(num_steps))
# rows = ["Areferg"]
# columns = ["Berger"]
# cell_text = ["ergerg"]
# the_table = ax_h.table(cellText=cell_text,
# rowLabels=rows,
# colLabels=columns,
# loc='right')
# fig_h.subplots_adjust(left=0.2, wspace=0.4)
data = np.random.uniform(0, 1, 80).reshape(20, 4)
mean_ts = np.mean(proportions_ts)
var_ts = np.var(proportions_ts)
mean_eg0pt1 = np.mean(proportions_eg0pt1)
mean_eg0pt3 = np.mean(proportions_eg0pt3)
var_eg0pt1 = np.var(proportions_eg0pt1)
var_eg0pt3 = np.var(proportions_eg0pt3)
prop_lt_25_eg0pt1 = np.sum(proportions_eg0pt1 < 0.25) / len(proportions_eg0pt1)
prop_lt_25_eg0pt3 = np.sum(proportions_eg0pt3 < 0.25) / len(proportions_eg0pt3)
prop_lt_25_ts = np.sum(proportions_ts < 0.25) / len(proportions_ts)
# prop_gt_25_lt_5_eg = np.sum(> proportions > 0.25) / len(proportions)
# prop_gt_25_lt_5_ts = np.sum(> proportions_ts > 0.25) / len(proportions_ts)
data = [[mean_ts, var_ts, prop_lt_25_ts],\
[mean_eg0pt1, var_eg0pt1, prop_lt_25_eg0pt1],\
[mean_eg0pt3, var_eg0pt3, prop_lt_25_eg0pt3]]
final_data = [['%.3f' % j for j in i] for i in data] #<0.25, 0.25< & <0.5, <0.5 & <0.75, <0.75 & <1.0
#table.auto_set_font_size(False)
# table.set_fontsize(7)
# table.auto_set_column_width((-1, 0, 1, 2, 3))
table = ax_h.table(cellText=final_data, colLabels=['Mean', 'Variance', 'prop < 0.25'], rowLabels = ["Thompson Sampling", "Epsilon Greedy 0.1", "Epsilon Greedy 0.3"], loc='bottom', cellLoc='center', bbox=[0.25, -0.5, 0.5, 0.3])
table.auto_set_font_size(False)
table.set_fontsize(7)
table.auto_set_column_width((-1, 0, 1, 2, 3))
# Adjust layout to make room for the table:
#ax_h.tick_params(axis='x', pad=20)
#fig_h.subplots_adjust(left=0.2, bottom=0.5)
#fig_h.tight_layout()
save_dir = "../simulation_analysis_saves/histograms/ExploreAndExploit/N={}".format(n)
Path(save_dir).mkdir(parents=True, exist_ok=True)
fig_h.savefig(save_dir + "/condition_prop_n={}.png".format(num_steps), bbox_inches = 'tight')
fig_h.clf()
def stacked_bar_plot_with_cutoff(df_ts = None, df_eg0pt1 = None, df_eg0pt3 = None, df_unif = None, df_tsppd = None, n = None, num_sims = None, df_ets = None, \
title = None, bs_prop = 0.0,\
ax = None, ax_idx = None, epsilon = None, es=None):
step_sizes = df_unif['num_steps'].unique()
size_vars = ["n/2", "n", "2*n", "4*n"]
t1_list_eg0pt1 = []
t1_list_eg0pt3 = []
t1_list_unif = []
t1_wald_list_unif = [] #IS REWARD NOT T1 TODO CHnage VAR NAMES
var_list = []
t1_list_ts = []
t1_list_tsppd = []
t1_list_ets = []
for num_steps in step_sizes:
df_for_num_steps_eg0pt1 = df_eg0pt1[df_eg0pt1['num_steps'] == num_steps].dropna()
df_for_num_steps_eg0pt3 = df_eg0pt3[df_eg0pt3['num_steps'] == num_steps].dropna()
df_for_num_steps_unif = df_unif[df_unif['num_steps'] == num_steps].dropna()
df_for_num_steps_ts = df_ts[df_ts['num_steps'] == num_steps].dropna()
df_for_num_steps_tsppd = df_tsppd[df_tsppd['num_steps'] == num_steps].dropna()
df_for_num_steps_ets = df_ets[df_ets['num_steps'] == num_steps].dropna()
#df_for_num_steps_unif = df_for_num_steps_unif.dropna()
# bins = np.arange(0, 1.01, .025)
unif_reward_mean = (df_for_num_steps_unif['total_reward']/num_steps).mean()
ts_reward_mean = (df_for_num_steps_ts['total_reward']/num_steps).mean()
eps_greedy_reward_mean_0pt1 = (df_for_num_steps_eg0pt1['total_reward']/num_steps).mean()
eps_greedy_reward_mean_0pt3 = (df_for_num_steps_eg0pt3['total_reward']/num_steps).mean()
tsppd_reward_mean = (df_for_num_steps_tsppd['total_reward']/num_steps).mean()
ets_reward_mean = (df_for_num_steps_ets['total_reward']/num_steps).mean()
t1_list_unif.append(unif_reward_mean)
t1_list_ts.append(ts_reward_mean)
t1_list_eg0pt1.append(eps_greedy_reward_mean_0pt1)
t1_list_eg0pt3.append(eps_greedy_reward_mean_0pt3)
t1_list_tsppd.append(tsppd_reward_mean)
t1_list_ets.append(ets_reward_mean)
t1_list_ts = np.array(t1_list_ts)
t1_list_tsppd = np.array(t1_list_tsppd)
t1_list_ets = np.array(t1_list_ets)
ind = np.arange(3*len(step_sizes), step=3)
# print(ind)
# print(step_sizes)
ax.set_xticks(ind)
ax.set_xticklabels(step_sizes)
print("var", var_list)
width = 0.44
capsize = width*4
width_total = 2*width
t1_list_eg0pt1 = np.array(t1_list_eg0pt1)
t1_list_eg0pt3 = np.array(t1_list_eg0pt3)
t1_list_unif = np.array(t1_list_unif)
num_sims_RL4RL = 5000
t1_eg0pt1_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_eg0pt1*(1-t1_list_eg0pt1)/num_sims_RL4RL) #95 CI for Proportion
t1_eg0pt3_se = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_eg0pt3*(1-t1_list_eg0pt3)/num_sims_RL4RL) #95 CI for Proportion
t1_se_unif = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_unif*(1-t1_list_unif)/num_sims_RL4RL)
t1_se_ts = stats.t.ppf(1-0.025, num_sims)*np.sqrt(t1_list_ts*(1-t1_list_ts)/num_sims_RL4RL)
num_sims_ppd = num_sims
t1_se_tsppd = stats.t.ppf(1-0.025, num_sims_ppd)*np.sqrt(t1_list_tsppd*(1-t1_list_tsppd)/num_sims_ppd)
num_sims_ets = num_sims
t1_se_ets = stats.t.ppf(1-0.025, num_sims_ppd)*np.sqrt(t1_list_ets*(1-t1_list_ets)/num_sims_ets)
print(t1_se_unif) #note that power goes to 1.0 for unif, thus error bars
#print(t1_se_unif)
p1 = ax.bar(ind, t1_list_eg0pt1, width = width, yerr = t1_eg0pt1_se, \
ecolor='black', capsize=capsize, color = 'yellow', edgecolor='black')
p3 = ax.bar(ind+width, t1_list_eg0pt3, width = width, yerr = t1_eg0pt3_se, \
ecolor='black', capsize=capsize, color = 'green', edgecolor='black')
p4 = ax.bar(ind+2*width, t1_list_ts, width = width, yerr = t1_se_ts,
ecolor='black', capsize=capsize, color = 'blue', edgecolor='black')
p5 = ax.bar(ind+3*width, t1_list_tsppd, width = width, yerr = t1_se_tsppd,
ecolor='black', capsize=capsize, color = 'purple', edgecolor='black')
p6 = ax.bar(ind+4*width, t1_list_ets, width = width, yerr = t1_se_ets,
ecolor='black', capsize=capsize, color = 'brown', edgecolor='black')
p2 = ax.bar(ind-width, t1_list_unif, width = width,\
yerr = t1_se_unif, ecolor='black', \
capsize=capsize, color = 'red', \
edgecolor='black')
if ax_idx == 2:
# leg1 = ax.legend((p1[0], p2[0], p3[0], p4[0]), ('Epsilon Greedy Chi Squared 0.1', "Uniform Chi Squared", "Epsilon Greedy Chi Squared 0.3", "Thompson Sampling Chi Squared"), bbox_to_anchor=(1.0, 1.76))
leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0], p5[0], p6[0]), ("Uniform Wald", 'Epsilon Greedy 0.1 Wald', "Epsilon Greedy 0.3 Wald", "Thompson Sampling Wald", "PPD c 0.1 Thompson Sampling Wald", "Epsilon 0.1 Thompson Sampling Wald"), bbox_to_anchor=(1.0, 1.76))
#leg1 = ax.legend((p2[0], p1[0], p3[0], p4[0]), ("Uniform Chi Squared", 'Epsilon Greedy Chi Squared 0.1', "Epsilon Greedy Chi Squared 0.3", "Thompson Sampling Chi Squared"), bbox_to_anchor=(1.0, 1.76))
#leg2 = ax.legend(loc = 2)
ax.add_artist(leg1)
# plt.tight_layout()
# plt.title(title)
# if ax_idx == 6 or ax_idx == 7 or ax_idx == 8:
ax.set_xlabel("number of participants = \n n/2, n, 2*n, 4*n")
ax.set_ylim(0.40, 0.8)
x = es / 2
optimal_arm = 0.5 + x
ax.axhline(y=optimal_arm, linestyle='--')
return [t1_list_unif, t1_list_eg0pt1, t1_list_ts] #returns [UR Eps_Greedy, TS], in this case, need to return for each step size, but only plotting for one bs, so save step size by model (4x2)
def parse_dir_old(root, root_cutoffs, num_sims):
arm_prob= 0.5
arm_prob_list = [0.2, 0.5, 0.8]
es_list = [0.5, 0.3, 0.1]
n_list = [32, 88, 785]
epsilon = 0.1
#EpsilonGreedyIsEffect/num_sims=5armProb=0.5/es=0.3epsilon=0.1/
root_dir = root + "/num_sims={}armProb={}".format(num_sims, arm_prob)
fig, ax = plt.subplots(1,3, figsize = (12,5))
#fig.set_size_inches(17.5, 13.5)
ax = ax.ravel()
i = 0
# ipdb.set_trace()
c = 0.1
num_sims_secb = 5000
root_ts = "../../../RL4RLSectionB/simulation_saves/IsEffect_fixedbs_RL4RLMay8/num_sims={}armProb=0.5".format(num_sims_secb)
root_eg = "../../../RL4RLSectionB/simulation_saves/EpsilonGreedyIsEffect/num_sims={}armProb=0.5".format(num_sims_secb)
root_unif = "../../../RL4RLSectionB/simulation_saves/UniformIsEffect/num_sims={}armProb=0.5".format(num_sims_secb)
root_ets = "../simulation_saves/EpsilonTSIsEffect/num_sims={}armProb=0.5".format(5000)
for n in n_list:
es = es_list[i]
bs = 1
es_dir_0pt1 = root_eg + "/es={}epsilon={}/".format(es, 0.1)
es_dir_0pt3 = root_eg + "/es={}epsilon={}/".format(es, 0.3)
ts_dir = root_ts + "/es={}/".format(es)
tsppd_dir = root_dir + "/es={}c={}/".format(es, c)
ets_dir = root_ets + "/es={}epsilon={}/".format(es, 0.1)
unif_dir = root_unif + "/es={}/".format(es)
to_check_eg0pt1 = glob.glob(es_dir_0pt1 + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(es_dir_0pt1 + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_eg0pt3 = glob.glob(es_dir_0pt3 + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(es_dir_0pt3 + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_unif = glob.glob(unif_dir + "/*Uniform*{}*{}Df.pkl".format(bs, es))[0]
assert(len(glob.glob(unif_dir + "/*Uniform*{}*{}Df.pkl".format(bs, es))) == 1)
to_check_ts = glob.glob(ts_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(ts_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_tsppd = glob.glob(tsppd_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(tsppd_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
to_check_ets = glob.glob(ets_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))[0] #Has eg, 34 in 348!!
assert(len(glob.glob(ets_dir + "/*Prior*{}*{}Df.pkl".format(bs,es))) == 1)
#------hists, tables etc
with open(to_check_eg0pt1, 'rb') as f:
df_eg0pt1 = pickle.load(f)
with open(to_check_eg0pt3, 'rb') as f:
df_eg0pt3 = pickle.load(f)
with open(to_check_unif, 'rb') as f:
df_unif = pickle.load(f)
if to_check_ts != None:
with open(to_check_ts, 'rb') as t:
df_ts = pickle.load(t)
with open(to_check_tsppd, 'rb') as f:
df_tsppd = pickle.load(f)
with open(to_check_ets, 'rb') as f:
df_ets = pickle.load(f)
# ipdb.set_trace()
rect_key = "TS"
rect_key = "Drop NA"
rectify_vars_noNa(df_eg0pt1, alg_key = rect_key)
rectify_vars_noNa(df_eg0pt3, alg_key = rect_key)
rectify_vars_noNa(df_ts, alg_key = rect_key)
rectify_vars_noNa(df_unif, alg_key = rect_key)
assert np.sum(df_eg0pt1["wald_type_stat"].isna()) == 0
assert np.sum(df_eg0pt1["wald_pval"].isna()) == 0
next_df = stacked_bar_plot_with_cutoff(df_eg0pt1 = df_eg0pt1, df_eg0pt3 = df_eg0pt3,\
df_unif = df_unif, df_ts = df_ts, df_tsppd = df_tsppd, df_ets = df_ets,\
n = n, es=es,num_sims = num_sims,
ax = ax[i], ax_idx = i, epsilon = epsilon)
#
ax[i].set_title("es = {}, n = {}".format(es, n))
ax[i].set_ylabel("Reward")
i += 1
df = pd.DataFrame(next_df, columns = ["n/2","n","2n","4n"])
df.index = ["Uniform Random Chi Squared","Epsilon Greedy Chi Squared", "Thompson Sampling Chi Squared"]
save_dir = "../simulation_analysis_saves/histograms/ExploreAndExploit/N={}".format(n)
Path(save_dir).mkdir(parents=True, exist_ok=True)
df.to_csv(save_dir + "/Reward={}_numsims={}.csv".format(n, num_sims))
title = "Average Reward \n Across {} Simulations".format(num_sims)
#ax[i].set_title(title, fontsize = 55)
#i +=1
#fig.suptitle("Type One Error Rates Across {} Simulations".format(num_sims))
fig.suptitle(title)
#fig.tight_layout(rect=[0, 0.03, 1, 0.95])
#handles, labels = ax[i-1].get_legend_handles_labels()
#fig.legend(handles, labels, loc='upper right', prop={'size': 50})
#fig.tight_layout()
save_dir = "../simulation_analysis_saves/power_t1_plots"
if not os.path.isdir(save_dir):
os.mkdir(save_dir)
# print("saving to ", "plots/{}.png".format(title))
#fig.set_tight_layout(True)
fig.tight_layout()
fig.subplots_adjust(top=.8)
fig.savefig(save_dir + "/{}.svg".format(title), bbox_inches = 'tight')
# plt.show()
fig.clf()
plt.close(fig)
if __name__ == "__main__":
root = "../simulation_saves/TSPPDIsEffect"
#parse_dir(root, root_cutoffs)
num_sims = 500
num_sims = 5000
num_sims = 10000
title = "Mean Reward \n Averaged Across {} Simulations".format(num_sims)
# parse_dir_old(root, root, num_sims)
parse_dir(root, root, num_sims, title = title, metric = "Reward", ylabel = "Reward", ymax = 0.80, num_es = 3)
title = "Proportion of Optimal Allocations \n Averaged Across {} Simulations".format(num_sims)
parse_dir(root, root, num_sims, title = title, metric = "PropOpt", ylabel = "Proportion Optimal Allocation", ymax = 1.0, num_es = 3)
parse_dir(root, root, num_sims, title = title, metric = "PropEps", ylabel = "Proportion Eps", ymax = 1.0, num_es = 3)
|
py
|
1a5d0c30a041f7de1937022ad209715969b87db1
|
from warnings import simplefilter
import numpy as np
from sklearn import model_selection
import wandb
from wandb.sklearn import utils
# ignore all future warnings
simplefilter(action="ignore", category=FutureWarning)
def learning_curve(
model,
X,
y,
cv=None,
shuffle=False,
random_state=None,
train_sizes=None,
n_jobs=1,
scoring=None,
):
"""Trains model on datasets of varying size and generates plot of score vs size.
Called by plot_learning_curve to visualize learning curve. Please use the function
plot_learning_curve() if you wish to visualize your learning curves.
"""
train_sizes, train_scores, test_scores = model_selection.learning_curve(
model,
X,
y,
cv=cv,
n_jobs=n_jobs,
train_sizes=train_sizes,
scoring=scoring,
shuffle=shuffle,
random_state=random_state,
)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
table = make_table(train_scores_mean, test_scores_mean, train_sizes)
chart = wandb.visualize("wandb/learning_curve/v1", table)
return chart
def make_table(train, test, train_sizes):
data = []
for i in range(len(train)):
if utils.check_against_limit(
i,
"learning_curve",
utils.chart_limit / 2,
):
break
train_set = ["train", utils.round_2(train[i]), train_sizes[i]]
test_set = ["test", utils.round_2(test[i]), train_sizes[i]]
data.append(train_set)
data.append(test_set)
table = wandb.Table(columns=["dataset", "score", "train_size"], data=data)
return table
|
py
|
1a5d0c6ba7d901b412587e0c9dc12aedbf0b4da3
|
import time
import math
import numpy as np
import tensorflow as tf
import ops
from config import config
from mac_cell import MACCell
'''
The MAC network model. It performs reasoning processes to answer a question over
knowledge base (the image) by decomposing it into attention-based computational steps,
each perform by a recurrent MAC cell.
The network has three main components.
Input unit: processes the network inputs: raw question strings and image into
distributional representations.
The MAC network: calls the MACcells (mac_cell.py) config.netLength number of times,
to perform the reasoning process over the question and image.
The output unit: a classifier that receives the question and final state of the MAC
network and uses them to compute log-likelihood over the possible one-word answers.
'''
class MACnet(object):
'''Initialize the class.
Args:
embeddingsInit: initialization for word embeddings (random / glove).
answerDict: answers dictionary (mapping between integer id and symbol).
'''
def __init__(self, embeddingsInit, answerDict, questionDict, nextElement = None):
self.input = nextElement
self.embeddingsInit = embeddingsInit
self.answerDict = answerDict
self.questionDict = questionDict
self.build()
'''
Initializes placeholders.
questionIndicesAll: integer ids of question words.
[batchSize, questionLength]
questionLengthsAll: length of each question.
[batchSize]
imagesPlaceholder: image features.
[batchSize, channels, height, width]
(converted internally to [batchSize, height, width, channels])
answerIndicesAll: integer ids of answer words.
[batchSize]
lr: learning rate (tensor scalar)
train: train / evaluation (tensor boolean)
dropout values dictionary (tensor scalars)
'''
# change to H x W x C?
def addPlaceholders(self):
with tf.variable_scope("Placeholders"):
## data
# questions
self.questionIndicesAll = tf.placeholder(tf.int32, shape = (None, None))
self.questionLengthsAll = tf.placeholder(tf.int32, shape = (None, ))
# images
# put image known dimension as last dim?
if config.imageObjects:
self.imagesAll = tf.placeholder(tf.float32, shape = (None, None, None))
self.imagesObjectNumAll = tf.placeholder(tf.int32, shape = (None, ))
else:
self.imagesPlaceholder = tf.placeholder(tf.float32, shape = (None, None, None, None))
self.imagesAll = tf.transpose(self.imagesPlaceholder, (0, 2, 3, 1))
# answers
self.answerIndicesAll = tf.placeholder(tf.int32, shape = (None, ))
if config.dataset == "VQA":
self.answerFreqListsAll = tf.placeholder(tf.int32, shape = (None, None))
self.answerFreqNumsAll = tf.placeholder(tf.int32, shape = (None, ))
if config.ansFormat == "mc":
self.choicesIndicesAll = tf.placeholder(tf.int32, shape = (None, None))
self.choicesNumsAll = tf.placeholder(tf.int32, shape = (None, ))
# in general could consolidate that with mc and make it more general if i did choicesIndices all of them
# in case of open ended
## optimization
self.lr = tf.placeholder(tf.float32, shape = ())
self.train = tf.placeholder(tf.bool, shape = ())
self.batchSizeAll = tf.shape(self.questionIndicesAll)[0]
## dropouts
# TODO: change dropouts to be 1 - current
self.dropouts = {
"encInput": tf.placeholder(tf.float32, shape = ()),
"encState": tf.placeholder(tf.float32, shape = ()),
"stem": tf.placeholder(tf.float32, shape = ()),
"question": tf.placeholder(tf.float32, shape = ()),
"read": tf.placeholder(tf.float32, shape = ()),
"write": tf.placeholder(tf.float32, shape = ()),
"memory": tf.placeholder(tf.float32, shape = ()),
"output": tf.placeholder(tf.float32, shape = ()),
"controlPre": tf.placeholder(tf.float32, shape = ()),
"controlPost": tf.placeholder(tf.float32, shape = ()),
"wordEmb": tf.placeholder(tf.float32, shape = ()),
"word": tf.placeholder(tf.float32, shape = ()),
"vocab": tf.placeholder(tf.float32, shape = ()),
"object": tf.placeholder(tf.float32, shape = ()),
"wordStandard": tf.placeholder(tf.float32, shape = ())
}
# batch norm params
self.batchNorm = {"decay": config.bnDecay, "train": self.train}
self.imageInDim = config.imageDims[-1]
if not config.imageObjects:
self.H, self.W, self.imageInDim = 7, 7, 2048# config.imageDims
if config.dataset == "CLEVR":
self.H, self.W, self.imageInDim = 14, 14, 1024
# Feeds data into placeholders. See addPlaceholders method for further details.
def createFeedDict(self, data, images, train):
feedDict = {
self.questionIndicesAll: data["questions"],
self.questionLengthsAll: data["questionLengths"],
self.answerIndicesAll: data["answers"],
self.dropouts["encInput"]: config.encInputDropout if train else 1.0,
self.dropouts["encState"]: config.encStateDropout if train else 1.0,
self.dropouts["stem"]: config.stemDropout if train else 1.0,
self.dropouts["question"]: config.qDropout if train else 1.0, #_
self.dropouts["memory"]: config.memoryDropout if train else 1.0,
self.dropouts["read"]: config.readDropout if train else 1.0, #_
self.dropouts["write"]: config.writeDropout if train else 1.0,
self.dropouts["output"]: config.outputDropout if train else 1.0,
self.dropouts["controlPre"]: config.controlPreDropout if train else 1.0,
self.dropouts["controlPost"]: config.controlPostDropout if train else 1.0,
self.dropouts["wordEmb"]: config.wordEmbDropout if train else 1.0,
self.dropouts["word"]: config.wordDp if train else 1.0,
self.dropouts["vocab"]: config.vocabDp if train else 1.0,
self.dropouts["object"]: config.objectDp if train else 1.0,
self.dropouts["wordStandard"]: config.wordStandardDp if train else 1.0,
self.lr: config.lr,
self.train: train
}
if config.imageObjects:
feedDict.update({
self.imagesAll: images["images"],
self.imagesObjectNumAll: data["objectsNums"],
})
else:
feedDict.update({
self.imagesPlaceholder: images["images"]
})
if config.dataset == "VQA":
feedDict.update({
self.answerFreqListsAll: data["answerFreqs"],
self.answerFreqNumsAll: data["answerFreqNums"]
})
if config.ansFormat == "mc":
feedDict.update({
self.choicesIndicesAll: data["choices"],
self.choicesNumsAll: data["choicesNums"]
})
return feedDict
# Splits data to a specific GPU (tower) for parallelization
def initTowerBatch(self, towerI, towersNum, dataSize):
towerBatchSize = tf.floordiv(dataSize, towersNum)
start = towerI * towerBatchSize
end = (towerI + 1) * towerBatchSize if towerI < towersNum - 1 else dataSize
self.questionIndices = self.questionIndicesAll[start:end]
self.questionLengths = self.questionLengthsAll[start:end]
self.images = self.imagesAll[start:end]
self.imagesObjectNum = None
if config.imageObjects:
self.imagesObjectNum = self.imagesObjectNumAll[start:end]
self.answerIndices = self.answerIndicesAll[start:end]
self.answerFreqs = self.answerFreqNums = None
if config.dataset == "VQA":
self.answerFreqLists = self.answerFreqListsAll[start:end]
self.answerFreqNums = self.answerFreqNumsAll[start:end]
self.choicesIndices = self.choicesNums = None
if config.ansFormat == "mc":
self.choicesIndices = self.choicesIndicesAll[start:end]
self.choicesNums = self.choicesNumsAll[start:end]
self.batchSize = end - start
'''
The Image Input Unit (stem). Passes the image features through a CNN-network
Optionally adds position encoding (doesn't in the default behavior).
Flatten the image into Height * Width "Knowledge base" array.
Args:
images: image input. [batchSize, height, width, inDim]
inDim: input image dimension
outDim: image out dimension
addLoc: if not None, adds positional encoding to the image
Returns preprocessed images.
[batchSize, height * width, outDim]
'''
def stem(self, images, inDim, outDim, addLoc = None):
with tf.variable_scope("stem"):
if config.stemNormalize:
images = tf.nn.l2_normalize(images, dim = -1)
if config.imageObjects: # VQA ??? or config.useBaseline:
features, dim = images, inDim
if config.stemLinear:
features = ops.linear(images, inDim, outDim, dropout = self.dropouts["stem"])
dim = outDim
elif config.stemDeep:
dims = [inDim] + config.stemDims + [outDim]
features = ops.FCLayer(features, dims, dropout = self.dropouts["stem"])
if config.stemAct != "NON":
features = ops.actF(config.stemAct)(features)
return features, dim
if addLoc is None:
addLoc = config.locationAware
if config.stemLinear:
features = ops.linear(images, inDim, outDim)
else:
if config.stemNumLayers == 0:
outDim = inDim
else:
dims = [inDim] + ([config.stemDim] * (config.stemNumLayers - 1)) + [outDim]
if addLoc:
images, inDim = ops.addLocation(images, inDim, config.locationDim,
h = self.H, w = self.W, locType = config.locationType)
dims[0] = inDim
features = ops.CNNLayer(images, dims,
batchNorm = self.batchNorm if config.stemBN else None,
dropout = self.dropouts["stem"],
kernelSizes = config.stemKernelSizes,
strides = config.stemStrideSizes)
if config.stemGridRnn:
features = ops.multigridRNNLayer(features, H, W, outDim)
if config.baselineNew or (not config.useBaseline):
features = tf.reshape(features, (self.batchSize, -1, outDim))
return features, outDim
# Embed question using parametrized word embeddings.
# The embedding are initialized to the values supported to the class initialization
def qEmbeddingsOp(self, qIndices, embInit):
with tf.variable_scope("qEmbeddings"):
embInit = tf.to_float(embInit)
embeddingsVar = tf.get_variable("emb", initializer = embInit,
dtype = tf.float32, trainable = (not config.wrdEmbQFixed))
embeddings = tf.concat([tf.zeros((1, config.wrdQEmbDim)), embeddingsVar], axis = 0)
questions = tf.nn.embedding_lookup(embeddings, qIndices)
return questions, embeddings
# Embed answer words
def aEmbeddingsOp(self, aIndices, embInit):
with tf.variable_scope("aEmbeddings"):
if embInit is None:
return None
embInit = tf.to_float(embInit)
embeddings = tf.get_variable("emb", initializer = embInit,
dtype = tf.float32, trainable = (not config.wrdEmbAFixed))
if config.ansFormat == "mc":
answers = tf.nn.embedding_lookup(embeddings, aIndices)
else:
answers = embeddings
return answers
def vocabEmbeddings(self, embInit, name):
with tf.variable_scope("vocabEmbeddings" + name):
embInit = tf.to_float(embInit)
embeddings = tf.get_variable("emb", initializer = embInit,
dtype = tf.float32, trainable = (not config.semanticFixEmbs))
return embeddings
# Embed question and answer words with tied embeddings
def qaEmbeddingsOp(self, qIndices, aIndices, embInit):
questions, embeddings = self.qEmbeddingsOp(qIndices, embInit)
answers = tf.nn.embedding_lookup(embeddings, aIndices)
return questions, answers, embeddings
'''
Embed question (and optionally answer) using parametrized word embeddings.
The embedding are initialized to the values supported to the class initialization
'''
def embeddingsOp(self, qIndices, aIndices, embInit):
# nullWord = tf.tile(tf.expand_dims(nullWord, axis = 0), [self.batchSize, 1, 1])
if config.ansEmbMod == "SHARED":
if config.ansFormat == "oe":
#if aIndices is None:
aIndices = embInit["oeAnswers"]
questions, answers, qaEmbeddings = self.qaEmbeddingsOp(qIndices, aIndices, embInit["qa"])
else:
questions, qEmbeddings = self.qEmbeddingsOp(qIndices, embInit["q"])
answers = self.aEmbeddingsOp(aIndices, embInit["a"])
if config.ansFormat == "oe" and config.ansEmbMod != "NON":
answers = tf.tile(tf.expand_dims(answers, axis = 0), [self.batchSize, 1, 1])
return questions, answers # , embeddings
'''
The Question Input Unit embeds the questions to randomly-initialized word vectors,
and runs a recurrent bidirectional encoder (RNN/LSTM etc.) that gives back
vector representations for each question (the RNN final hidden state), and
representations for each of the question words (the RNN outputs for each word).
The method uses bidirectional LSTM, by default.
Optionally projects the outputs of the LSTM (with linear projection /
optionally with some activation).
Args:
questions: question word embeddings
[batchSize, questionLength, wordEmbDim]
questionLengths: the question lengths.
[batchSize]
projWords: True to apply projection on RNN outputs.
projQuestion: True to apply projection on final RNN state.
projDim: projection dimension in case projection is applied.
Returns:
Contextual Words: RNN outputs for the words.
[batchSize, questionLength, ctrlDim]
Vectorized Question: Final hidden state representing the whole question.
[batchSize, ctrlDim]
'''
def encoder(self, questions, questionLengths, projWords = False,
projQuestion = False, projDim = None):
with tf.variable_scope("encoder"):
# variational dropout option
varDp = None
if config.encVariationalDropout:
varDp = {"stateDp": self.dropouts["stateInput"],
"inputDp": self.dropouts["encInput"],
"inputSize": config.wrdQEmbDim}
# rnns
for i in range(config.encNumLayers):
questionCntxWords, vecQuestions = ops.RNNLayer(questions, questionLengths,
config.encDim, bi = config.encBi, cellType = config.encType,
dropout = self.dropouts["encInput"], varDp = varDp, name = "rnn%d" % i)
# dropout for the question vector
vecQuestions = tf.nn.dropout(vecQuestions, self.dropouts["question"])
# projection of encoder outputs
if projWords:
questionCntxWords = ops.linear(questionCntxWords, config.encDim, projDim,
name = "projCW")
if projQuestion:
vecQuestions = ops.linear(vecQuestions, config.encDim, projDim,
act = config.encProjQAct, name = "projQ")
return questionCntxWords, vecQuestions
'''
Stacked Attention Layer for baseline. Computes interaction between images
and the previous memory, and casts it back to compute attention over the
image, which in turn is summed up with the previous memory to result in the
new one.
Args:
images: input image.
[batchSize, H * W, inDim]
memory: previous memory value
[batchSize, inDim]
inDim: inputs dimension
hDim: hidden dimension to compute interactions between image and memory
Returns the new memory value.
'''
def baselineAttLayer(self, images, memory, inDim, hDim, name = "", reuse = None):
with tf.variable_scope("attLayer" + name, reuse = reuse):
# projImages = ops.linear(images, inDim, hDim, name = "projImage")
# projMemory = tf.expand_dims(ops.linear(memory, inDim, hDim, name = "projMemory"), axis = -2)
# if config.saMultiplicative:
# interactions = projImages * projMemory
# else:
# interactions = tf.tanh(projImages + projMemory)
interactions, hDim = ops.mul(images, memory, inDim, proj = {"dim": hDim, "shared": False},
interMod = config.baselineAttType)
attention = ops.inter2att(interactions, hDim, mask = self.imagesObjectNum)
summary = ops.att2Smry(attention, images)
newMemory = memory + summary
return newMemory
'''
Baseline approach:
If baselineAtt is True, applies several layers (baselineAttNumLayers)
of stacked attention to image and memory, when memory is initialized
to the vector questions. See baselineAttLayer for further details.
Otherwise, computes result output features based on image representation
(baselineCNN), or question (baselineLSTM) or both.
Args:
vecQuestions: question vector representation
[batchSize, questionDim]
questionDim: dimension of question vectors
images: (flattened) image representation
[batchSize, imageDim]
imageDim: dimension of image representations.
hDim: hidden dimension to compute interactions between image and memory
(for attention-based baseline).
Returns final features to use in later classifier.
[batchSize, outDim] (out dimension depends on baseline method)
'''
def baseline(self, vecQuestions, questionDim, images, imageDim, hDim):
with tf.variable_scope("baseline"):
if config.baselineAtt:
memory = ops.linear(vecQuestions, questionDim, hDim, name = "qProj")
images = ops.linear(images, imageDim, hDim, name = "iProj")
for i in range(config.baselineAttNumLayers):
memory = self.baselineAttLayer(images, memory, hDim, hDim,
name = "baseline%d" % i)
memDim = hDim
else:
if config.imageObjects:
cff = tf.get_variable("cff", shape = (imageDim, ), initializer = tf.random_normal_initializer())
interactions, hDim = ops.mul(images, cff, imageDim)
attention = ops.inter2att(interactions, hDim, mask = self.imagesObjectNum)
images = ops.att2Smry(attention, images)
else:
images, imageDim = ops.linearizeFeatures(images, self.H, self.W,
imageDim, projDim = config.baselineProjDim)
if config.baselineLSTM and config.baselineCNN:
memory = tf.concat([vecQuestions, images], axis = -1)
memDim = questionDim + imageDim
elif config.baselineLSTM:
memory = vecQuestions
memDim = questionDim
else: # config.baselineCNN
memory = images
memDim = imageDim
return memory, memDim
'''
Runs the MAC recurrent network to perform the reasoning process.
Initializes a MAC cell and runs netLength iterations.
Currently it passes the question and knowledge base to the cell during
its creating, such that it doesn't need to interact with it through
inputs / outputs while running. The recurrent computation happens
by working iteratively over the hidden (control, memory) states.
Args:
images: flattened image features. Used as the "Knowledge Base".
(Received by default model behavior from the Image Input Units).
[batchSize, H * W, memDim]
vecQuestions: vector questions representations.
(Received by default model behavior from the Question Input Units
as the final RNN state).
[batchSize, ctrlDim]
questionWords: question word embeddings.
[batchSize, questionLength, ctrlDim]
questionCntxWords: question contextual words.
(Received by default model behavior from the Question Input Units
as the series of RNN output states).
[batchSize, questionLength, ctrlDim]
questionLengths: question lengths.
[batchSize]
Returns the final control state and memory state resulted from the network.
([batchSize, ctrlDim], [bathSize, memDim])
'''
def MACnetwork(self, images, vecQuestions, questionWords, questionCntxWords,
questionLengths, name = "", reuse = None):
with tf.variable_scope("MACnetwork" + name, reuse = reuse):
self.macCell = MACCell(
vecQuestions = vecQuestions,
questionWords = questionWords,
questionCntxWords = questionCntxWords,
questionLengths = questionLengths,
knowledgeBase = images,
kbSize = self.imagesObjectNum,
memoryDropout = self.dropouts["memory"],
readDropout = self.dropouts["read"],
writeDropout = self.dropouts["write"],
controlDropoutPre = self.dropouts["controlPre"],
controlDropoutPost = self.dropouts["controlPost"],
wordDropout = self.dropouts["word"],
vocabDropout = self.dropouts["vocab"],
objectDropout = self.dropouts["object"],
# qDropoutMAC = self.qDropoutMAC,
batchSize = self.batchSize,
train = self.train,
reuse = reuse)
state = self.macCell.zero_state(self.batchSize, tf.float32)
none = tf.zeros((self.batchSize, 1), dtype = tf.float32)
for i in range(config.netLength):
self.macCell.iteration = i
_, state = self.macCell(none, state)
finalControl = state.control
finalMemory = state.memory
return finalControl, finalMemory
'''
Output Unit (step 1): chooses the inputs to the output classifier.
By default the classifier input will be the the final memory state of the MAC network.
If outQuestion is True, concatenate the question representation to that.
If outImage is True, concatenate the image flattened representation.
Args:
memory: (final) memory state of the MAC network.
[batchSize, memDim]
vecQuestions: question vector representation.
[batchSize, ctrlDim]
images: image features.
[batchSize, H, W, imageInDim]
imageInDim: images dimension.
Returns the resulted features and their dimension.
'''
def outputOp(self, memory, control, vecQuestions, images, imageInDim):
with tf.variable_scope("outputUnit"):
features = memory
dim = config.memDim
if config.outQuestion:
q = vecQuestions
eQ = ops.linear(q, config.ctrlDim, config.memDim, name = "outQuestion")
features, dim = ops.concat(features, eQ, config.memDim, mul = config.outQuestionMul)
# assumes imageObjects False
if config.outImage:
images, imagesDim = ops.linearizeFeatures(images, self.H, self.W, self.imageInDim,
outputDim = config.outImageDim)
images = ops.linear(images, config.memDim, config.outImageDim, name = "outImage")
features = tf.concat([features, images], axis = -1)
dim += config.outImageDim
return features, dim
'''
Output Unit (step 2): Computes the logits for the answers. Passes the features
through fully-connected network to get the logits over the possible answers.
Optionally uses answer word embeddings in computing the logits (by default, it doesn't).
Args:
features: features used to compute logits
[batchSize, inDim]
inDim: features dimension
aEmbedding: supported word embeddings for answer words in case answerMod is not NON.
Optionally computes logits by computing dot-product with answer embeddings.
Returns: the computed logits.
[batchSize, answerWordsNum]
'''
# in mc has to be ansMod not NON
def classifier(self, features, inDim, choices = None, choicesNums = None):
with tf.variable_scope("classifier"):
outDim = config.answerWordsNum
dims = [inDim] + config.outClassifierDims + [outDim]
if config.answerMod != "NON":
dims[-1] = config.wrdAEmbDim
logits = ops.FCLayer(features, dims,
batchNorm = self.batchNorm if config.outputBN else None,
dropout = self.dropouts["output"])
if config.answerMod != "NON":
logits = ops.gatedAct(config.outAct, gate = config.outGate)(logits)
logits = tf.nn.dropout(logits, self.dropouts["output"])
concat = {"x": config.answerBias}
interactions, interDim = ops.mul(choices, logits, dims[-1], interMod = config.answerMod, concat = concat)
logits = ops.inter2logits(interactions, interDim, sumMod = config.answerSumMod)
if config.ansFormat == "oe":
logits += ops.getBias((outDim, ), "ans")
else:
logits = ops.expMask(logits, choicesNums)
return logits
def aggregateFreqs(self, answerFreqs, answerFreqNums):
if answerFreqs is None:
return None
answerFreqs = tf.one_hot(answerFreqs, config.answerWordsNum) # , axis = -1
mask = tf.sequence_mask(answerFreqNums, maxlen = config.AnswerFreqMaxNum)
mask = tf.expand_dims(tf.to_float(mask), axis = -1)
answerFreqs *= mask
answerFreqs = tf.reduce_sum(answerFreqs, axis = 1)
return answerFreqs
# Computes mean cross entropy loss between logits and answers.
def addAnswerLossOp(self, logits, answers, answerFreqs, answerFreqNums):
if config.lossType == "softmax": # or config.ansFormat == "mc":
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = answers, logits = logits)
elif config.lossType == "svm":
answers = tf.one_hot(answers, config.answerWordsNum) # , axis = -1
losses = ops.hingeLoss(labels = answers, logits = logits)
elif config.lossType == "probSoftmax":
answerFreqs = tf.to_float(answerFreqs)
answerDist = answerFreqs / tf.expand_dims(tf.to_float(answerFreqNums), axis = -1)
losses = tf.nn.softmax_cross_entropy_with_logits_v2(labels = answerDist, logits = logits)
if config.weightedSoftmax:
weights = tf.to_float(answerFreqNums) / float(config.AnswerFreqMaxNum)
losses *= weights
elif config.lossType == "sigmoid":
if config.dataset == "VQA":
answerFreqs = tf.to_float(answerFreqs)
answerDist = answerFreqs / float(config.AnswerFreqMaxNum)
else:
answerDist = tf.one_hot(answers, config.answerWordsNum)
if config.lossWeight == 1:
losses = tf.nn.sigmoid_cross_entropy_with_logits(labels = answerDist, logits = logits)
else:
print("weighted sigmoid")
losses = tf.nn.weighted_cross_entropy_with_logits(targets = answerDist, logits = logits,
pos_weight = config.lossWeight)
if config.ansWeighting or config.ansWeightingRoot:
losses *= self.answerDict.weights
losses = tf.reduce_sum(losses, axis = -1)
else:
print("non-identified loss")
loss = tf.reduce_mean(losses)
self.answerLossList.append(loss)
return loss, losses
# Computes predictions (by finding maximal logit value, corresponding to highest probability)
# and mean accuracy between predictions and answers.
def addPredOp(self, logits, answers): # , answerFreqs
with tf.variable_scope("pred"):
if config.ansFormat == "oe":# and config.ansAddUnk:
mask = tf.to_float(tf.sequence_mask([2], config.answerWordsNum)) * (-1e30) # 1 or 2?
logits += mask
preds = tf.to_int32(tf.argmax(logits, axis = -1)) # tf.nn.softmax(
if config.dataset == "VQA" and config.ansFormat == "oe":
agreeing = tf.reduce_sum(tf.one_hot(preds, config.answerWordsNum) * self.answerFreqs, axis = -1)
corrects = tf.minimum(agreeing * 0.3, 1.0) # /3 ?
else:
corrects = tf.to_float(tf.equal(preds, answers))
correctNum = tf.reduce_sum(corrects)
acc = tf.reduce_mean(corrects)
self.correctNumList.append(correctNum)
self.answerAccList.append(acc)
return preds, corrects, correctNum
# Creates optimizer (adam)
def addOptimizerOp(self):
with tf.variable_scope("trainAddOptimizer"):
self.globalStep = tf.Variable(0, dtype = tf.int32, trainable = False, name = "globalStep") # init to 0 every run?
optimizer = tf.train.AdamOptimizer(learning_rate = self.lr)
if config.subsetOpt:
self.subsetOptimizer = tf.train.AdamOptimizer(learning_rate = self.lr * config.subsetOptMult)
return optimizer
'''
Computes gradients for all variables or subset of them, based on provided loss,
using optimizer.
'''
def computeGradients(self, optimizer, loss, trainableVars = None): # tf.trainable_variables()
with tf.variable_scope("computeGradients"):
if config.trainSubset:
trainableVars = []
allVars = tf.trainable_variables()
for var in allVars:
if any((s in var.name) for s in config.varSubset):
trainableVars.append(var)
if config.subsetOpt:
trainableVars = []
subsetVars = []
allVars = tf.trainable_variables()
for var in allVars:
if any((s in var.name) for s in config.varSubset):
subsetVars.append(var)
else:
trainableVars.append(var)
gradients_vars = optimizer.compute_gradients(loss, trainableVars)
if config.subsetOpt:
self.subset_gradients_vars = self.subsetOptimizer.compute_gradients(loss, subsetVars)
self.subset_gradientVarsList.append(self.subset_gradients_vars)
return gradients_vars
'''
Apply gradients. Optionally clip them, and update exponential moving averages
for parameters.
'''
def addTrainingOp(self, optimizer, gradients_vars):
with tf.variable_scope("train"):
gradients, variables = zip(*gradients_vars)
norm = tf.global_norm(gradients)
# gradient clipping
if config.clipGradients:
clippedGradients, _ = tf.clip_by_global_norm(gradients, config.gradMaxNorm, use_norm = norm)
gradients_vars = zip(clippedGradients, variables)
# updates ops (for batch norm) and train op
updateOps = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(updateOps):
train = optimizer.apply_gradients(gradients_vars, global_step = self.globalStep)
if config.subsetOpt:
subsetTrain = self.subsetOptimizer.apply_gradients(self.subset_gradientVarsAll)
train = tf.group(train, subsetTrain)
# exponential moving average
if config.useEMA:
ema = tf.train.ExponentialMovingAverage(decay = config.emaDecayRate)
maintainAveragesOp = ema.apply(tf.trainable_variables())
with tf.control_dependencies([train]):
trainAndUpdateOp = tf.group(maintainAveragesOp)
train = trainAndUpdateOp
self.emaDict = ema.variables_to_restore()
return train, norm
def averageAcrossTowers(self, gpusNum):
if gpusNum == 1:
self.lossAll = self.lossList[0]
self.answerLossAll = self.answerLossList[0]
self.answerAccAll = self.answerAccList[0]
self.correctNumAll = self.correctNumList[0]
self.predsAll = self.predsList[0]
self.gradientVarsAll = self.gradientVarsList[0]
if config.subsetOpt:
self.subset_gradientVarsAll = self.subset_gradientVarsList[0]
else:
self.lossAll = tf.reduce_mean(tf.stack(self.lossList, axis = 0), axis = 0)
self.answerLossAll = tf.reduce_mean(tf.stack(self.answerLossList, axis = 0), axis = 0)
self.answerAccAll = tf.reduce_mean(tf.stack(self.answerAccList, axis = 0), axis = 0)
self.correctNumAll = tf.reduce_sum(tf.stack(self.correctNumList, axis = 0), axis = 0)
self.predsAll = tf.concat(self.predsList, axis = 0)
self.gradientVarsAll = []
for grads_var in zip(*self.gradientVarsList):
gradients, variables = zip(*grads_var)
if gradients[0] != None:
avgGradient = tf.reduce_mean(tf.stack(gradients, axis = 0), axis = 0)
else:
avgGradient = None
var = variables[0]
grad_var = (avgGradient, var)
self.gradientVarsAll.append(grad_var)
if config.subsetOpt:
self.subset_gradientVarsAll = []
for grads_var in zip(*self.subset_gradientVarsList):
gradients, variables = zip(*grads_var)
if gradients[0] != None:
avgGradient = tf.reduce_mean(tf.stack(gradients, axis = 0), axis = 0)
else:
avgGradient = None
var = variables[0]
grad_var = (avgGradient, var)
self.subset_gradientVarsAll.append(grad_var)
def trim2DVectors(self, vectors, vectorsLengths):
maxLength = np.max(vectorsLengths)
return vectors[:,:maxLength]
def trimData(self, data):
data["questions"] = self.trim2DVectors(data["questions"], data["questionLengths"])
return data
'''
Builds predictions JSON, by adding the model's predictions and attention maps
back to the original data JSON.
'''
def buildPredsList(self, data, predictions, attentionMaps):
predsList = []
for i, instance in enumerate(data["instances"]):
if predictions is not None:
if config.ansFormat == "oe":
pred = self.answerDict.decodeId(predictions[i])
else:
pred = instance["choices"][predictions[i]]
instance["prediction"] = pred
# aggregate np attentions of instance i in the batch into 2d list
attMapToList = lambda attMap: [step[i].tolist() for step in attMap]
if attentionMaps is not None:
attentions = {k: attMapToList(attentionMaps[k]) for k in attentionMaps}
instance["attentions"] = attentions
predsList.append(instance)
return predsList
'''
Processes a batch of data with the model.
Args:
sess: TF session
data: Data batch. Dictionary that contains numpy array for:
questions, questionLengths, answers.
See preprocess.py for further information of the batch structure.
images: batch of image features, as numpy array. images["images"] contains
[batchSize, channels, h, w]
train: True to run batch for training.
getAtt: True to return attention maps for question and image (and optionally
self-attention and gate values).
Returns results: e.g. loss, accuracy, running time.
'''
def runBatch(self, sess, data, images, train, getPreds = False, getAtt = False, allData = None):
batchSizeOp = self.batchSizeAll
indicesOp = self.noOp
trainOp = self.trainOp if train else self.noOp
gradNormOp = self.gradNorm if train else self.noOp
predsOp = (self.predsAll, self.correctNumAll, self.answerAccAll)
attOp = self.macCell.attentions if not config.useBaseline else (self.attentions if config.baselineNew else self.noOp)
time0 = time.time()
feed = self.createFeedDict(data, images, train)
time1 = time.time()
batchSize, indices, _, loss, predsInfo, gradNorm, attentionMaps = sess.run(
[batchSizeOp, indicesOp, trainOp, self.lossAll, predsOp, gradNormOp, attOp],
feed_dict = feed)
time2 = time.time()
predsList = []
if getPreds:
if data is None:
data = [allData["instances"][i] for i in indices]
predsList = self.buildPredsList(data, predsInfo[0], attentionMaps if getAtt else None)
return {"loss": loss,
"correctNum": predsInfo[1],
"acc": predsInfo[2],
"preds": predsList,
"gradNorm": gradNorm if train else -1,
"readTime": time1 - time0,
"trainTime": time2 - time1,
"batchSize": batchSize}
def build(self):
self.addPlaceholders()
self.optimizer = self.addOptimizerOp()
self.gradientVarsList = []
if config.subsetOpt:
self.subset_gradientVarsList = []
self.lossList = []
self.answerLossList = []
self.correctNumList = []
self.answerAccList = []
self.predsList = []
with tf.variable_scope("macModel"):
for i in range(config.gpusNum):
with tf.device("/gpu:{}".format(i)):
with tf.name_scope("tower{}".format(i)) as scope:
self.initTowerBatch(i, config.gpusNum, self.batchSizeAll)
self.loss = tf.constant(0.0)
# embed questions words (and optionally answer words)
questionWords, choices = self.embeddingsOp(self.questionIndices,
self.choicesIndices, self.embeddingsInit)
projWords = projQuestion = ((config.encDim != config.ctrlDim) or config.encProj)
questionCntxWords, vecQuestions = self.encoder(questionWords,
self.questionLengths, projWords, projQuestion, config.ctrlDim)
# Image Input Unit (stem)
imageFeatures, imageDim = self.stem(self.images, self.imageInDim, config.memDim)
# baseline model
if config.useBaseline:
# inpImg = imageFeatures if config.baselineNew else self.images
# inpDim = imageDim if config.baselineNew else self.imageInDim
output, dim = self.baseline(vecQuestions, config.ctrlDim,
imageFeatures, imageDim, config.attDim) # self.images
# MAC model
else:
finalControl, finalMemory = self.MACnetwork(imageFeatures, vecQuestions,
questionWords, questionCntxWords, self.questionLengths)
# Output Unit - step 1 (preparing classifier inputs)
output, dim = self.outputOp(finalMemory, finalControl, vecQuestions,
self.images, self.imageInDim)
# Output Unit - step 2 (classifier)
logits = self.classifier(output, dim, choices, self.choicesNums)
# compute loss, predictions, accuracy
if config.dataset == "VQA":
self.answerFreqs = self.aggregateFreqs(self.answerFreqLists, self.answerFreqNums)
else:
self.answerFreqs = None
self.answerFreqNums = None
answerLoss, self.losses = self.addAnswerLossOp(logits, self.answerIndices,
self.answerFreqs, self.answerFreqNums)
self.preds, self.corrects, self.correctNum = self.addPredOp(logits, self.answerIndices) # ,self.answerFreqs
self.loss += answerLoss
self.predsList.append(self.preds)
self.lossList.append(self.loss)
# compute gradients
gradient_vars = self.computeGradients(self.optimizer, self.loss, trainableVars = None)
self.gradientVarsList.append(gradient_vars)
# reuse variables in next towers
tf.get_variable_scope().reuse_variables()
self.averageAcrossTowers(config.gpusNum)
self.trainOp, self.gradNorm = self.addTrainingOp(self.optimizer, self.gradientVarsAll)
self.noOp = tf.no_op()
|
py
|
1a5d0d468c5fb88ebb94ecf679924d8450603691
|
# Copyright 2017 Netflix
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module: security_monkey.watchers.github.team
:platform: Unix
:synopsis: Watcher for GitHub Organization Teams.
.. version:: $$VERSION$$
.. moduleauthor:: Mike Grima <[email protected]>
"""
from security_monkey import app
from security_monkey.common.github.util import get_github_creds, iter_org, strip_url_fields
from security_monkey.datastore import Account
from security_monkey.decorators import record_exception
from security_monkey.exceptions import InvalidResponseCodeFromGitHubError
from security_monkey.watcher import Watcher, ChangeItem
import requests
GITHUB_URL = "https://api.github.com/"
class GitHubTeam(Watcher):
index = 'team'
i_am_singular = 'team'
i_am_plural = 'teams'
account_type = 'GitHub'
def __init__(self, accounts=None, debug=False):
super(GitHubTeam, self).__init__(accounts=accounts, debug=debug)
self.honor_ephemerals = True
self.ephemeral_paths = []
self.github_creds = get_github_creds(self.accounts)
def slurp(self):
@record_exception(source="{index}-watcher".format(index=self.index))
def fetch_org_teams(**kwargs):
account = Account.query.filter(Account.name == kwargs["account_name"]).first()
item_list = []
# Fetch teams:
app.logger.debug("Fetching organization teams for: {}".format(account.identifier))
teams = strip_url_fields(self.list_org_teams(account.identifier))
for team in teams:
item_list.append(GitHubTeamItem(
account=account.name,
name=team["name"],
arn="{}/team/{}".format(account.identifier, team["slug"]),
config=team,
source_watcher=self
))
return item_list, kwargs["exception_map"]
@iter_org(orgs=self.accounts)
def slurp_items(**kwargs):
# Are we skipping this org?
if self.check_ignore_list(kwargs["account_name"]):
app.logger.debug("Skipping ignored account: {}".format(kwargs["account_name"]))
return [], kwargs["exception_map"]
# Exception handling complexities...
results = fetch_org_teams(**kwargs)
if not results:
return [], kwargs["exception_map"]
return results
items, exc = slurp_items(index=self.index)
return items, exc
def list_org_teams(self, org):
headers = {
'Authorization': 'token {}'.format(self.github_creds[org])
}
params = {
"page": 1,
}
done = False
teams = []
while not done:
url = "{}orgs/{}/teams".format(GITHUB_URL, org)
result = requests.get(url, headers=headers, params=params)
if result.status_code != 200:
raise InvalidResponseCodeFromGitHubError(org, result.status_code)
if not result.links.get("last"):
done = True
else:
params["page"] += 1
result_json = result.json()
teams += result_json
return teams
class GitHubTeamItem(ChangeItem):
def __init__(self, account=None, name=None, arn=None, config=None, source_watcher=None):
super(GitHubTeamItem, self).__init__(index=GitHubTeam.index,
region="universal",
account=account,
name=name,
arn=arn,
new_config=config if config else {},
source_watcher=source_watcher)
|
py
|
1a5d0d52dd4d6ed2895f1557811d8a32b236f015
|
####
# MQTT Publish
#
# Wertet Temperatur Sensor, Button und RFID Reader aus und sendet die Daten an MQTT Broker
import time
from machine import I2C, Pin, SoftI2C, SoftSPI
from lib.sensors.bmp180 import BMP180
from lib.sensors.mfrc522 import MFRC522
from lib.config import *
from umqtt.robust import MQTTClient
import ubinascii
import machine
from lib.config import *
import micropython
import network
client_id = ubinascii.hexlify(machine.unique_id())
# Topic's
topicTEMP = b"iotkit/sensor"
topicALERT = b"iotkit/alert"
topicRFID = b"iotkit/rfid"
topicSERVO = b"iotkit/servo"
# MQTT Brocker
hostname = "cloud.tbz.ch"
port = 1883
# Klassifikation
cls = ( "low", "middle", "high" )
type = 0
# Temperatur Sensor
bus = SoftI2C(sda=Pin(DEFAULT_IOTKIT_I2C_SDA), scl=Pin(DEFAULT_IOTKIT_I2C_SCL))
bmp180 = BMP180(bus)
bmp180.oversample_sett = 2
bmp180.baseline = 101325
# RFID Reader
sck = Pin(DEFAULT_IOTKIT_SPI_SCLK)
mosi = Pin(DEFAULT_IOTKIT_SPI_MOSI)
miso = Pin(DEFAULT_IOTKIT_SPI_MISO)
spi = SoftSPI(baudrate=100000, polarity=0, phase=0, sck=sck, mosi=mosi, miso=miso)
sda = Pin(DEFAULT_IOTKIT_SPI_SS, Pin.OUT)
rdr = MFRC522(spi, sda)
# Button
button = Pin(DEFAULT_IOTKIT_BUTTON1, Pin.IN)
# MQTT Subscribe
def sub_cb(topic, msg):
print((topic, msg))
if topic == b'notification' and msg == b'received':
print('ESP received hello message')
# MQTT Login
def connect_and_subscribe():
global client_id, hostname, port, topicSERVO
client = MQTTClient(client_id, hostname, port)
client.set_callback(sub_cb)
client.connect()
client.subscribe(topicSERVO)
print('Connected to %s MQTT broker, subscribed to %s topic' % (hostname, topicSERVO))
return client
# MQTT Restart
def restart_and_reconnect():
print('Failed to connect to MQTT broker. Reconnecting...')
time.sleep(10)
machine.reset()
### Hauptprogramm
counter = 1
try:
client = connect_and_subscribe()
except OSError as e:
restart_and_reconnect()
while True:
try:
client.check_msg()
# Temperatur
if counter % 3 == 1:
msg = "0xBC," + str(bmp180.temperature - 5) + "," + str(bmp180.pressure / 1000) + ",low"
if counter % 3 == 2:
msg = "0xBC," + str(bmp180.temperature) + "," + str(bmp180.pressure / 1000) + ",middle"
if counter % 3 == 0:
msg = "0xBC," + str(bmp180.temperature + 5) + "," + str(bmp180.pressure / 1000) + ",high"
client.publish(topicTEMP, msg)
print( topicTEMP, counter, msg )
counter = counter + 1
# Button startet BPMN Prozess
if button.value() == 0:
client.publish(topicALERT, "alert")
print( topicALERT, counter, "alert" )
uid = ""
(stat, tag_type) = rdr.request(rdr.REQIDL)
if stat == rdr.OK:
(stat, raw_uid) = rdr.anticoll()
if stat == rdr.OK:
uid = ("0x%02x%02x%02x%02x" % (raw_uid[0], raw_uid[1], raw_uid[2], raw_uid[3]))
client.publish(topicRFID, uid )
print(topicRFID, counter, uid)
time.sleep( 1.0 )
except OSError as e:
restart_and_reconnect()
|
py
|
1a5d0d877d402b0124137d38f0be2f1204a73991
|
#!/usr/bin/python -u
# -*- coding: latin-1 -*-
#
# Monks and doors problem in Z3
#
# From http://user.it.uu.se/~rolandb/LP/gammal/960615_facit.ps
# """
# There is a room with four doors and eight monks. One or more of
# the doors may be exit. Each monk is either telling a lie or the truth.
#
# The monks make the following statements:
# Monk 1: Door A is the exit.
# Monk 2: At least one of the doors B and C is the exit.
# Monk 3: Monk 1 and Monk 2 are telling the truth.
# Monk 4: Doors A and B are both exits.
# Monk 5: Doors A and B are both exits.
# Monk 6: Either Monk 4 or Monk 5 is telling the truth.
# Monk 7: If Monk 3 is telling the truth, so is Monk 6.
# Monk 8: If Monk 7 and Monk 8 are telling the truth, so is Monk 1.
#
# Which door is an exit no matter who is a liar and who is telling the
# truth.
# """
#
# Answer: Door A is an exit.
# And monks 1, 7, and 8 are telling the truth.
#
# This Z3 model was written by Hakan Kjellerstrand ([email protected])
# See also my Z3 page: http://hakank.org/z3/
#
from z3_utils_hakank import *
sol = Solver()
# variables
A,B,C,D = Bools("A B C D") # Doors
doors = [A,B,C,D]
M1,M2,M3,M4,M5,M6,M7,M8 = Bools("M1 M2 M3 M4 M5 M6 M7 M8") # monks
monks = [M1,M2,M3,M4,M5,M6,M7,M8]
# constraints
# Monk 1: Door A is the exit.
sol.add(M1 == A)
# Monk 2: At least one of the doors B and C is the exit.
sol.add(M2 == (If(B,1,0) + If(C,1,0) >= 1))
# Monk 3: Monk 1 and Monk 2 are telling the truth.
sol.add(M3 == And(M1, M2))
# Monk 4: Doors A and B are both exits.
sol.add(M4 == And(A,B))
# Monk 5: Doors A and C are both exits.
sol.add(M5 == And(A, C))
# Monk 6: Either Monk 4 or Monk 5 is telling the truth.
sol.add(M6 == Or(M4,M5))
# Monk 7: If Monk 3 is telling the truth, so is Monk 6.
sol.add(M7 == Implies(M3, M6))
# Monk 8: If Monk 7 and Monk 8 are telling the truth, so is Monk 1.
sol.add(M8 == (Implies(And(M7, M8),M1)))
# Exactly one door is an exit.
sol.add(If(A,1,0) + If(B,1,0) + If(C,1,0) + If(D,1,0) == 1)
num_solutions = 0
while sol.check() == sat:
num_solutions += 1
mod = sol.model()
print("doors:", [mod.eval(D) for D in doors])
print("monks:", [mod.eval(M) for M in monks])
getDifferentSolution(sol,mod,doors,monks)
print("num_solutions:", num_solutions)
|
py
|
1a5d0da8df063d296b05f89056b810e2bc89606b
|
import random
from collections import namedtuple
from threading import RLock
import numpy as np
Transition = namedtuple("Transition", ("s", "a", "r", "s_", "done"))
class ServerBuffer:
def __init__(self, capacity, observation_shapes, action_size):
self.size = capacity
self.num_in_buffer = 0
self.stored_in_buffer = 0
self.num_parts = len(observation_shapes)
self.obs_shapes = observation_shapes
self.act_shape = (action_size,)
# initialize all np.arrays which store necessary data
self.observations = []
for part_id in range(self.num_parts):
obs = np.empty((self.size, ) + self.obs_shapes[part_id], dtype=np.float32)
self.observations.append(obs)
self.actions = np.empty((self.size, ) + self.act_shape, dtype=np.float32)
self.rewards = np.empty((self.size, ), dtype=np.float32)
self.dones = np.empty((self.size, ), dtype=np.bool)
self.td_errors = np.empty((self.size, ), dtype=np.float32)
self.pointer = 0
self._store_lock = RLock()
def push_episode(self, episode):
""" episode = [observations, actions, rewards, dones]
observations = [obs_part_1, ..., obs_part_n]
"""
with self._store_lock:
observations, actions, rewards, dones = episode
episode_len = len(actions)
self.stored_in_buffer += episode_len
self.num_in_buffer = min(self.size, self.num_in_buffer + episode_len)
indices = np.arange(self.pointer, self.pointer + episode_len) % self.size
for part_id in range(self.num_parts):
self.observations[part_id][indices] = np.array(observations[part_id])
self.actions[indices] = np.array(actions)
self.rewards[indices] = np.array(rewards)
self.dones[indices] = np.array(dones)
self.td_errors[indices] = np.ones(len(indices))
self.pointer = (self.pointer + episode_len) % self.size
def get_stored_in_buffer(self):
return self.stored_in_buffer
def get_state(self, idx, history_len=1):
""" compose the state from a number (history_len) of observations
"""
state = []
for part_id in range(self.num_parts):
start_idx = idx - history_len + 1
if (start_idx < 0 or np.any(self.dones[start_idx:idx+1])):
s = np.zeros((history_len, ) + self.obs_shapes[part_id], dtype=np.float32)
indices = [idx]
for i in range(history_len-1):
next_idx = (idx-i-1) % self.size
if next_idx >= self.num_in_buffer or self.dones[next_idx]:
break
indices.append(next_idx)
indices = indices[::-1]
s[-len(indices):] = self.observations[part_id][indices]
else:
s = self.observations[part_id][slice(start_idx, idx+1, 1)]
state.append(s)
return state
def get_transition_n_step(self, idx, history_len=1, n_step=1, gamma=0.99):
state = self.get_state(idx, history_len)
next_state = self.get_state((idx + n_step) % self.size, history_len)
cum_reward = 0
indices = np.arange(idx, idx + n_step) % self.size
for num, i in enumerate(indices):
cum_reward += self.rewards[i] * (gamma ** num)
done = self.dones[i]
if done:
break
return state, self.actions[idx], cum_reward, next_state, done, self.td_errors[idx]
def update_td_errors(self, indices, td_errors):
self.td_errors[indices] = td_errors
def get_batch(self, batch_size, history_len=1, n_step=1, gamma=0.99, indices=None):
with self._store_lock:
if indices is None:
indices = random.sample(range(self.num_in_buffer), k=batch_size)
transitions = []
for idx in indices:
transition = self.get_transition_n_step(idx, history_len, n_step, gamma)
transitions.append(transition)
states = []
for part_id in range(self.num_parts):
state = [transitions[i][0][part_id] for i in range(batch_size)]
states.append(state)
actions = [transitions[i][1] for i in range(batch_size)]
rewards = [transitions[i][2] for i in range(batch_size)]
next_states = []
for part_id in range(self.num_parts):
next_state = [transitions[i][3][part_id] for i in range(batch_size)]
next_states.append(next_state)
dones = [transitions[i][4] for i in range(batch_size)]
batch = Transition(
np.array(states, dtype=np.float32),
np.array(actions, dtype=np.float32),
np.array(rewards, dtype=np.float32),
np.array(next_states, dtype=np.float32),
np.array(dones, dtype=np.bool)
)
return batch
def get_prioritized_batch(self, batch_size, history_len=1,
n_step=1, gamma=0.99,
priority="proportional", alpha=0.6, beta=1.0):
with self._store_lock:
if priority == "proportional":
p = np.power(np.abs(self.td_errors[:self.num_in_buffer])+1e-6, alpha)
p = p / p.sum()
indices = np.random.choice(range(self.num_in_buffer), size=batch_size, p=p)
probs = p[indices]
is_weights = np.power(self.num_in_buffer * probs, -beta)
is_weights = is_weights / is_weights.max()
batch = self.get_batch(batch_size, history_len, n_step, gamma, indices)
return batch, indices, is_weights
|
py
|
1a5d0e32ab12ee0fed6009b1b45336b96d8df456
|
#!/usr/bin/env python
#
# Copyright (c) 2016-2020 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tbb import *
from tbb import __all__, __doc__
if __name__ == "__main__":
from tbb import _main
import sys
sys.exit(_main())
|
py
|
1a5d0eeaf695dd617e01dcda989f3b540b299112
|
import logging
from collections import OrderedDict
from dataclasses import dataclass, field
from typing import Dict, Type
import requests
from requests.exceptions import HTTPError
from datahub.configuration.common import ConfigModel
from datahub.ingestion.api.common import RecordEnvelope, WorkUnit
from datahub.ingestion.api.sink import Sink, SinkReport, WriteCallback
from datahub.metadata import ( # MLFeatureSnapshotClass,
ChartSnapshotClass,
CorpGroupSnapshotClass,
CorpUserSnapshotClass,
DashboardSnapshotClass,
DataProcessSnapshotClass,
DatasetSnapshotClass,
MLModelSnapshotClass,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
logger = logging.getLogger(__name__)
resource_locator: Dict[Type[object], str] = {
ChartSnapshotClass: "charts",
DashboardSnapshotClass: "dashboards",
CorpUserSnapshotClass: "corpUsers",
CorpGroupSnapshotClass: "corpGroups",
DatasetSnapshotClass: "datasets",
DataProcessSnapshotClass: "dataProcesses",
MLModelSnapshotClass: "mlModels",
}
def _rest_li_ify(obj):
if isinstance(obj, (dict, OrderedDict)):
if len(obj.keys()) == 1:
key = list(obj.keys())[0]
value = obj[key]
if key.find("com.linkedin.pegasus2avro.") >= 0:
new_key = key.replace("com.linkedin.pegasus2avro.", "com.linkedin.")
return {new_key: _rest_li_ify(value)}
elif key == "string" or key == "array":
return value
new_obj = {}
for key, value in obj.items():
if value is not None:
new_obj[key] = _rest_li_ify(value)
return new_obj
elif isinstance(obj, list):
new_obj = [_rest_li_ify(item) for item in obj]
return new_obj
return obj
class DatahubRestSinkConfig(ConfigModel):
"""Configuration class for holding connectivity to datahub gms"""
server: str = "http://localhost:8080"
@dataclass
class DatahubRestSink(Sink):
config: DatahubRestSinkConfig
report: SinkReport = field(default_factory=SinkReport)
@classmethod
def create(cls, config_dict, ctx):
config = DatahubRestSinkConfig.parse_obj(config_dict)
return cls(ctx, config)
def get_ingest_endpoint(self, mce: MetadataChangeEvent):
snapshot_type = type(mce.proposedSnapshot)
snapshot_resource = resource_locator.get(snapshot_type, None)
if not snapshot_resource:
raise ValueError(
f"Failed to locate a snapshot resource for type {snapshot_type}"
)
return f"{self.config.server}/{snapshot_resource}?action=ingest"
def handle_work_unit_start(self, workunit: WorkUnit) -> None:
pass
def handle_work_unit_end(self, workunit: WorkUnit) -> None:
pass
def write_record_async(
self,
record_envelope: RecordEnvelope[MetadataChangeEvent],
write_callback: WriteCallback,
):
headers = {"X-RestLi-Protocol-Version": "2.0.0"}
mce = record_envelope.record
url = self.get_ingest_endpoint(mce)
raw_mce_obj = mce.proposedSnapshot.to_obj()
mce_obj = _rest_li_ify(raw_mce_obj)
snapshot = {"snapshot": mce_obj}
try:
response = requests.post(url, headers=headers, json=snapshot)
# with open('data.json', 'w') as outfile:
# json.dump(serialized_snapshot, outfile)
response.raise_for_status()
self.report.report_record_written(record_envelope)
write_callback.on_success(record_envelope, {})
except HTTPError as e:
info = response.json()
self.report.report_failure({"e": e, "info": info})
write_callback.on_failure(record_envelope, e, info)
except Exception as e:
self.report.report_failure({"e": e})
write_callback.on_failure(record_envelope, e, {})
def get_report(self) -> SinkReport:
return self.report
def close(self):
pass
|
py
|
1a5d0f0ff274b42a3f026fe373dfae0a12f59f29
|
# Generated by Django 2.2.2 on 2019-06-18 19:01
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wells', '0099_update_well_orientation_status'),
]
operations = [
migrations.AlterField(
model_name='activitysubmission',
name='coordinate_acquisition_code',
field=models.ForeignKey(blank=True, null=True, db_column='coordinate_acquisition_code', default='H', on_delete=django.db.models.deletion.PROTECT, to='wells.CoordinateAcquisitionCode', verbose_name='Location Accuracy Code'),
),
migrations.AlterField(
model_name='well',
name='coordinate_acquisition_code',
field=models.ForeignKey(blank=True, null=True, db_column='coordinate_acquisition_code', default='H', on_delete=django.db.models.deletion.PROTECT, to='wells.CoordinateAcquisitionCode', verbose_name='Location Accuracy Code'),
),
]
|
py
|
1a5d1162de8fd462d802220bdd23ad94cbfd26bb
|
#***************************************************************
#* Name: LMS7002_GFIR3.py
#* Purpose: Class implementing LMS7002 GFIR3 functions
#* Author: Lime Microsystems ()
#* Created: 2016-11-14
#* Copyright: Lime Microsystems (limemicro.com)
#* License:
#**************************************************************
from LMS7002_base import *
from LMS7002_GFIR import *
class LMS7002_GFIR3(LMS7002_base):
__slots__ = ['CMB0a', 'CMB0b', 'CMB0c','CMB1a','CMB1b','CMB1c', 'CMB2a', 'CMB2b', 'CMB2c', 'CMB3a', 'CMB3b', 'CMB3c', 'CMB4a', 'CMB4b', 'CMB4c', 'rxtx'] # Used to generate error on typos
def __init__(self, chip, RxTx, Channel):
if RxTx not in ['RX', 'TX']:
raise ValueError("Parameter RxTx must be 'RX' or 'TX'")
if Channel not in ['A', 'B']:
raise ValueError("Parameter Channel must be 'A' or 'B'")
self.chip = chip
self.rxtx = RxTx
self.channel = Channel
self.CMB0a = LMS7002_GFIR(chip, RxTx, Channel, 3, '0a')
self.CMB1a = LMS7002_GFIR(chip, RxTx, Channel, 3, '1a')
self.CMB2a = LMS7002_GFIR(chip, RxTx, Channel, 3, '2a')
self.CMB3a = LMS7002_GFIR(chip, RxTx, Channel, 3, '3a')
self.CMB4a = LMS7002_GFIR(chip, RxTx, Channel, 3, '4a')
self.CMB0b = LMS7002_GFIR(chip, RxTx, Channel, 3, '0b')
self.CMB1b = LMS7002_GFIR(chip, RxTx, Channel, 3, '1b')
self.CMB2b = LMS7002_GFIR(chip, RxTx, Channel, 3, '2b')
self.CMB3b = LMS7002_GFIR(chip, RxTx, Channel, 3, '3b')
self.CMB4b = LMS7002_GFIR(chip, RxTx, Channel, 3, '4b')
self.CMB0c = LMS7002_GFIR(chip, RxTx, Channel, 3, '0c')
self.CMB1c = LMS7002_GFIR(chip, RxTx, Channel, 3, '1c')
self.CMB2c = LMS7002_GFIR(chip, RxTx, Channel, 3, '2c')
self.CMB3c = LMS7002_GFIR(chip, RxTx, Channel, 3, '3c')
self.CMB4c = LMS7002_GFIR(chip, RxTx, Channel, 3, '4c')
def zeroOut(self):
"""
Initialize all FIR coefficients to 0
"""
for i in range(0, 8):
self.CMB0a[i] = 0
self.CMB1a[i] = 0
self.CMB2a[i] = 0
self.CMB3a[i] = 0
self.CMB4a[i] = 0
self.CMB0b[i] = 0
self.CMB1b[i] = 0
self.CMB2b[i] = 0
self.CMB3b[i] = 0
self.CMB4b[i] = 0
self.CMB0c[i] = 0
self.CMB1c[i] = 0
self.CMB2c[i] = 0
self.CMB3c[i] = 0
self.CMB4c[i] = 0
#
# Operator overloading for easy access FIR[index]=val
#
def __getitem__(self, key):
"""
Get the FIR coefficient bank
"""
if key not in [(0,'a'), (0, 'b'), (0, 'c'),
(1,'a'), (1, 'b'), (1, 'c'),
(2,'a'), (2, 'b'), (2, 'c'),
(3,'a'), (3, 'b'), (3, 'c'),
(4,'a'), (4, 'b'), (4, 'c')]:
raise ValueError("Index must be in [(0,'a'), (0, 'b'), (0, 'c'), (1,'a'), (1, 'b'), (1, 'c'), (2,'a'), (2, 'b'), (2, 'c'), (3,'a'), (3, 'b'), (3, 'c'), (4,'a'), (4, 'b'), (4, 'c')")
if key==(0,'a'):
return self.CMB0a
elif key==(1,'a'):
return self.CMB1a
elif key==(2,'a'):
return self.CMB2a
elif key==(3,'a'):
return self.CMB3a
elif key==(4,'a'):
return self.CMB4a
elif key==(0,'b'):
return self.CMB0b
elif key==(1,'b'):
return self.CMB1b
elif key==(2,'b'):
return self.CMB2b
elif key==(3,'b'):
return self.CMB3b
elif key==(4,'b'):
return self.CMB4b
elif key==(0,'c'):
return self.CMB0c
elif key==(1,'c'):
return self.CMB1c
elif key==(2,'c'):
return self.CMB2c
elif key==(3,'c'):
return self.CMB3c
else:
return self.CMB4c
#
# Operator overloading for readable representation of FIR coefficients
#
def __str__(self):
return self.__repr__()
def __repr__(self):
ret = self.rxtx+"GFIR3 Channel "+self.channel+"\n"
for coef in [self.CMB0a, self.CMB1a, self.CMB2a, self.CMB3a, self.CMB4a,
self.CMB0b, self.CMB1b, self.CMB2b, self.CMB3b, self.CMB4b,
self.CMB0c, self.CMB1c, self.CMB2c, self.CMB3c, self.CMB4c]:
tmp = "CMB"+coef.suffix+" = ["
for i in range(0,8):
tmp += self.intToHex(coef[i])+', '
tmp = tmp[:-2] + "]\n"
ret += tmp
return ret
|
py
|
1a5d122d732a956b66d65f638317707ea310ea30
|
"""
Test output formatting for Series/DataFrame, including to_string & reprs
"""
from datetime import datetime
from io import StringIO
import itertools
from operator import methodcaller
import os
from pathlib import Path
import re
from shutil import get_terminal_size
import sys
import textwrap
import dateutil
import numpy as np
import pytest
import pytz
from pandas.compat import is_platform_32bit, is_platform_windows
import pandas as pd
from pandas import (
DataFrame,
Index,
MultiIndex,
NaT,
Series,
Timestamp,
date_range,
get_option,
option_context,
read_csv,
reset_option,
set_option,
)
import pandas._testing as tm
import pandas.io.formats.format as fmt
import pandas.io.formats.printing as printing
use_32bit_repr = is_platform_windows() or is_platform_32bit()
@pytest.fixture(params=["string", "pathlike", "buffer"])
def filepath_or_buffer_id(request):
"""
A fixture yielding test ids for filepath_or_buffer testing.
"""
return request.param
@pytest.fixture
def filepath_or_buffer(filepath_or_buffer_id, tmp_path):
"""
A fixture yielding a string representing a filepath, a path-like object
and a StringIO buffer. Also checks that buffer is not closed.
"""
if filepath_or_buffer_id == "buffer":
buf = StringIO()
yield buf
assert not buf.closed
else:
assert isinstance(tmp_path, Path)
if filepath_or_buffer_id == "pathlike":
yield tmp_path / "foo"
else:
yield str(tmp_path / "foo")
@pytest.fixture
def assert_filepath_or_buffer_equals(
filepath_or_buffer, filepath_or_buffer_id, encoding
):
"""
Assertion helper for checking filepath_or_buffer.
"""
def _assert_filepath_or_buffer_equals(expected):
if filepath_or_buffer_id == "string":
with open(filepath_or_buffer, encoding=encoding) as f:
result = f.read()
elif filepath_or_buffer_id == "pathlike":
result = filepath_or_buffer.read_text(encoding=encoding)
elif filepath_or_buffer_id == "buffer":
result = filepath_or_buffer.getvalue()
assert result == expected
return _assert_filepath_or_buffer_equals
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
return pth
def has_info_repr(df):
r = repr(df)
c1 = r.split("\n")[0].startswith("<class")
c2 = r.split("\n")[0].startswith(r"<class") # _repr_html_
return c1 or c2
def has_non_verbose_info_repr(df):
has_info = has_info_repr(df)
r = repr(df)
# 1. <class>
# 2. Index
# 3. Columns
# 4. dtype
# 5. memory usage
# 6. trailing newline
nv = len(r.split("\n")) == 6
return has_info and nv
def has_horizontally_truncated_repr(df):
try: # Check header row
fst_line = np.array(repr(df).splitlines()[0].split())
cand_col = np.where(fst_line == "...")[0][0]
except IndexError:
return False
# Make sure each row has this ... in the same place
r = repr(df)
for ix, l in enumerate(r.splitlines()):
if not r.split()[cand_col] == "...":
return False
return True
def has_vertically_truncated_repr(df):
r = repr(df)
only_dot_row = False
for row in r.splitlines():
if re.match(r"^[\.\ ]+$", row):
only_dot_row = True
return only_dot_row
def has_truncated_repr(df):
return has_horizontally_truncated_repr(df) or has_vertically_truncated_repr(df)
def has_doubly_truncated_repr(df):
return has_horizontally_truncated_repr(df) and has_vertically_truncated_repr(df)
def has_expanded_repr(df):
r = repr(df)
for line in r.split("\n"):
if line.endswith("\\"):
return True
return False
@pytest.mark.filterwarnings("ignore::FutureWarning:.*format")
class TestDataFrameFormatting:
def test_repr_embedded_ndarray(self):
arr = np.empty(10, dtype=[("err", object)])
for i in range(len(arr)):
arr["err"][i] = np.random.randn(i)
df = DataFrame(arr)
repr(df["err"])
repr(df)
df.to_string()
def test_eng_float_formatter(self, float_frame):
df = float_frame
df.loc[5] = 0
fmt.set_eng_float_format()
repr(df)
fmt.set_eng_float_format(use_eng_prefix=True)
repr(df)
fmt.set_eng_float_format(accuracy=0)
repr(df)
tm.reset_display_options()
def test_show_null_counts(self):
df = DataFrame(1, columns=range(10), index=range(10))
df.iloc[1, 1] = np.nan
def check(null_counts, result):
buf = StringIO()
df.info(buf=buf, null_counts=null_counts)
assert ("non-null" in buf.getvalue()) is result
with option_context(
"display.max_info_rows", 20, "display.max_info_columns", 20
):
check(None, True)
check(True, True)
check(False, False)
with option_context("display.max_info_rows", 5, "display.max_info_columns", 5):
check(None, False)
check(True, False)
check(False, False)
def test_repr_tuples(self):
buf = StringIO()
df = DataFrame({"tups": list(zip(range(10), range(10)))})
repr(df)
df.to_string(col_space=10, buf=buf)
def test_repr_truncation(self):
max_len = 20
with option_context("display.max_colwidth", max_len):
df = DataFrame(
{
"A": np.random.randn(10),
"B": [
tm.rands(np.random.randint(max_len - 1, max_len + 1))
for i in range(10)
],
}
)
r = repr(df)
r = r[r.find("\n") + 1 :]
adj = fmt._get_adjustment()
for line, value in zip(r.split("\n"), df["B"]):
if adj.len(value) + 1 > max_len:
assert "..." in line
else:
assert "..." not in line
with option_context("display.max_colwidth", 999999):
assert "..." not in repr(df)
with option_context("display.max_colwidth", max_len + 2):
assert "..." not in repr(df)
def test_repr_deprecation_negative_int(self):
# FIXME: remove in future version after deprecation cycle
# Non-regression test for:
# https://github.com/pandas-dev/pandas/issues/31532
width = get_option("display.max_colwidth")
with tm.assert_produces_warning(FutureWarning):
set_option("display.max_colwidth", -1)
set_option("display.max_colwidth", width)
def test_repr_chop_threshold(self):
df = DataFrame([[0.1, 0.5], [0.5, -0.1]])
pd.reset_option("display.chop_threshold") # default None
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
with option_context("display.chop_threshold", 0.2):
assert repr(df) == " 0 1\n0 0.0 0.5\n1 0.5 0.0"
with option_context("display.chop_threshold", 0.6):
assert repr(df) == " 0 1\n0 0.0 0.0\n1 0.0 0.0"
with option_context("display.chop_threshold", None):
assert repr(df) == " 0 1\n0 0.1 0.5\n1 0.5 -0.1"
def test_repr_chop_threshold_column_below(self):
# GH 6839: validation case
df = pd.DataFrame([[10, 20, 30, 40], [8e-10, -1e-11, 2e-9, -2e-11]]).T
with option_context("display.chop_threshold", 0):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 -1.000000e-11\n"
"2 30.0 2.000000e-09\n"
"3 40.0 -2.000000e-11"
)
with option_context("display.chop_threshold", 1e-8):
assert repr(df) == (
" 0 1\n"
"0 10.0 0.000000e+00\n"
"1 20.0 0.000000e+00\n"
"2 30.0 0.000000e+00\n"
"3 40.0 0.000000e+00"
)
with option_context("display.chop_threshold", 5e-11):
assert repr(df) == (
" 0 1\n"
"0 10.0 8.000000e-10\n"
"1 20.0 0.000000e+00\n"
"2 30.0 2.000000e-09\n"
"3 40.0 0.000000e+00"
)
def test_repr_obeys_max_seq_limit(self):
with option_context("display.max_seq_items", 2000):
assert len(printing.pprint_thing(list(range(1000)))) > 1000
with option_context("display.max_seq_items", 5):
assert len(printing.pprint_thing(list(range(1000)))) < 100
def test_repr_set(self):
assert printing.pprint_thing({1}) == "{1}"
def test_repr_is_valid_construction_code(self):
# for the case of Index, where the repr is traditional rather than
# stylized
idx = Index(["a", "b"])
res = eval("pd." + repr(idx))
tm.assert_series_equal(Series(res), Series(idx))
def test_repr_should_return_str(self):
# https://docs.python.org/3/reference/datamodel.html#object.__repr__
# "...The return value must be a string object."
# (str on py2.x, str (unicode) on py3)
data = [8, 5, 3, 5]
index1 = ["\u03c3", "\u03c4", "\u03c5", "\u03c6"]
cols = ["\u03c8"]
df = DataFrame(data, columns=cols, index=index1)
assert type(df.__repr__()) == str # both py2 / 3
def test_repr_no_backslash(self):
with option_context("mode.sim_interactive", True):
df = DataFrame(np.random.randn(10, 4))
assert "\\" not in repr(df)
def test_expand_frame_repr(self):
df_small = DataFrame("hello", index=[0], columns=[0])
df_wide = DataFrame("hello", index=[0], columns=range(10))
df_tall = DataFrame("hello", index=range(30), columns=range(5))
with option_context("mode.sim_interactive", True):
with option_context(
"display.max_columns",
10,
"display.width",
20,
"display.max_rows",
20,
"display.show_dimensions",
True,
):
with option_context("display.expand_frame_repr", True):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_truncated_repr(df_wide)
assert has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert has_expanded_repr(df_tall)
with option_context("display.expand_frame_repr", False):
assert not has_truncated_repr(df_small)
assert not has_expanded_repr(df_small)
assert not has_horizontally_truncated_repr(df_wide)
assert not has_expanded_repr(df_wide)
assert has_vertically_truncated_repr(df_tall)
assert not has_expanded_repr(df_tall)
def test_repr_non_interactive(self):
# in non interactive mode, there can be no dependency on the
# result of terminal auto size detection
df = DataFrame("hello", index=range(1000), columns=range(5))
with option_context(
"mode.sim_interactive", False, "display.width", 0, "display.max_rows", 5000
):
assert not has_truncated_repr(df)
assert not has_expanded_repr(df)
def test_repr_truncates_terminal_size(self, monkeypatch):
# see gh-21180
terminal_size = (118, 96)
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
index = range(5)
columns = pd.MultiIndex.from_tuples(
[
("This is a long title with > 37 chars.", "cat"),
("This is a loooooonger title with > 43 chars.", "dog"),
]
)
df = pd.DataFrame(1, index=index, columns=columns)
result = repr(df)
h1, h2 = result.split("\n")[:2]
assert "long" in h1
assert "loooooonger" in h1
assert "cat" in h2
assert "dog" in h2
# regular columns
df2 = pd.DataFrame({"A" * 41: [1, 2], "B" * 41: [1, 2]})
result = repr(df2)
assert df2.columns[0] in result.split("\n")[0]
def test_repr_truncates_terminal_size_full(self, monkeypatch):
# GH 22984 ensure entire window is filled
terminal_size = (80, 24)
df = pd.DataFrame(np.random.rand(1, 7))
monkeypatch.setattr(
"pandas.io.formats.format.get_terminal_size", lambda: terminal_size
)
assert "..." not in str(df)
def test_repr_truncation_column_size(self):
# dataframe with last column very wide -> check it is not used to
# determine size of truncation (...) column
df = pd.DataFrame(
{
"a": [108480, 30830],
"b": [12345, 12345],
"c": [12345, 12345],
"d": [12345, 12345],
"e": ["a" * 50] * 2,
}
)
assert "..." in str(df)
assert " ... " not in str(df)
def test_repr_max_columns_max_rows(self):
term_width, term_height = get_terminal_size()
if term_width < 10 or term_height < 10:
pytest.skip(f"terminal size too small, {term_width} x {term_height}")
def mkframe(n):
index = [f"{i:05d}" for i in range(n)]
return DataFrame(0, index, index)
df6 = mkframe(6)
df10 = mkframe(10)
with option_context("mode.sim_interactive", True):
with option_context("display.width", term_width * 2):
with option_context("display.max_rows", 5, "display.max_columns", 5):
assert not has_expanded_repr(mkframe(4))
assert not has_expanded_repr(mkframe(5))
assert not has_expanded_repr(df6)
assert has_doubly_truncated_repr(df6)
with option_context("display.max_rows", 20, "display.max_columns", 10):
# Out off max_columns boundary, but no extending
# since not exceeding width
assert not has_expanded_repr(df6)
assert not has_truncated_repr(df6)
with option_context("display.max_rows", 9, "display.max_columns", 10):
# out vertical bounds can not result in expanded repr
assert not has_expanded_repr(df10)
assert has_vertically_truncated_repr(df10)
# width=None in terminal, auto detection
with option_context(
"display.max_columns",
100,
"display.max_rows",
term_width * 20,
"display.width",
None,
):
df = mkframe((term_width // 7) - 2)
assert not has_expanded_repr(df)
df = mkframe((term_width // 7) + 2)
printing.pprint_thing(df._repr_fits_horizontal_())
assert has_expanded_repr(df)
def test_repr_min_rows(self):
df = pd.DataFrame({"a": range(20)})
# default setting no truncation even if above min_rows
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
df = pd.DataFrame({"a": range(61)})
# default of max_rows 60 triggers truncation if above
assert ".." in repr(df)
assert ".." in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(df)
assert "2 " not in repr(df)
assert "..." in df._repr_html_()
assert "<td>2</td>" not in df._repr_html_()
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(df)
assert "<td>5</td>" in df._repr_html_()
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(df)
assert "<td>5</td>" not in df._repr_html_()
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(df)
assert ".." not in df._repr_html_()
def test_str_max_colwidth(self):
# GH 7856
df = pd.DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "uncomfortably long line with lots of stuff",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably long line with lots of stuff 1\n"
"1 foo bar stuff 1"
)
with option_context("max_colwidth", 20):
assert str(df) == (
" a b c d\n"
"0 foo bar uncomfortably lo... 1\n"
"1 foo bar stuff 1"
)
def test_to_string_truncate(self):
# GH 9784 - dont truncate when calling DataFrame.to_string
df = pd.DataFrame(
[
{
"a": "foo",
"b": "bar",
"c": "let's make this a very VERY long line that is longer "
"than the default 50 character limit",
"d": 1,
},
{"a": "foo", "b": "bar", "c": "stuff", "d": 1},
]
)
df.set_index(["a", "b", "c"])
assert df.to_string() == (
" a b "
" c d\n"
"0 foo bar let's make this a very VERY long line t"
"hat is longer than the default 50 character limit 1\n"
"1 foo bar "
" stuff 1"
)
with option_context("max_colwidth", 20):
# the display option has no effect on the to_string method
assert df.to_string() == (
" a b "
" c d\n"
"0 foo bar let's make this a very VERY long line t"
"hat is longer than the default 50 character limit 1\n"
"1 foo bar "
" stuff 1"
)
assert df.to_string(max_colwidth=20) == (
" a b c d\n"
"0 foo bar let's make this ... 1\n"
"1 foo bar stuff 1"
)
def test_auto_detect(self):
term_width, term_height = get_terminal_size()
fac = 1.05 # Arbitrary large factor to exceed term width
cols = range(int(term_width * fac))
index = range(10)
df = DataFrame(index=index, columns=cols)
with option_context("mode.sim_interactive", True):
with option_context("max_rows", None):
with option_context("max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
with option_context("max_rows", 0):
with option_context("max_columns", 0):
# Truncate with auto detection.
assert has_horizontally_truncated_repr(df)
index = range(int(term_height * fac))
df = DataFrame(index=index, columns=cols)
with option_context("max_rows", 0):
with option_context("max_columns", None):
# Wrap around with None
assert has_expanded_repr(df)
# Truncate vertically
assert has_vertically_truncated_repr(df)
with option_context("max_rows", None):
with option_context("max_columns", 0):
assert has_horizontally_truncated_repr(df)
def test_to_string_repr_unicode(self):
buf = StringIO()
unicode_values = ["\u03c3"] * 10
unicode_values = np.array(unicode_values, dtype=object)
df = DataFrame({"unicode": unicode_values})
df.to_string(col_space=10, buf=buf)
# it works!
repr(df)
idx = Index(["abc", "\u03c3a", "aegdvg"])
ser = Series(np.random.randn(len(idx)), idx)
rs = repr(ser).split("\n")
line_len = len(rs[0])
for line in rs[1:]:
try:
line = line.decode(get_option("display.encoding"))
except AttributeError:
pass
if not line.startswith("dtype:"):
assert len(line) == line_len
# it works even if sys.stdin in None
_stdin = sys.stdin
try:
sys.stdin = None
repr(df)
finally:
sys.stdin = _stdin
def test_to_string_unicode_columns(self, float_frame):
df = DataFrame({"\u03c3": np.arange(10.0)})
buf = StringIO()
df.to_string(buf=buf)
buf.getvalue()
buf = StringIO()
df.info(buf=buf)
buf.getvalue()
result = float_frame.to_string()
assert isinstance(result, str)
def test_to_string_utf8_columns(self):
n = "\u05d0".encode("utf-8")
with option_context("display.max_rows", 1):
df = DataFrame([1, 2], columns=[n])
repr(df)
def test_to_string_unicode_two(self):
dm = DataFrame({"c/\u03c3": []})
buf = StringIO()
dm.to_string(buf)
def test_to_string_unicode_three(self):
dm = DataFrame(["\xc2"])
buf = StringIO()
dm.to_string(buf)
def test_to_string_with_formatters(self):
df = DataFrame(
{
"int": [1, 2, 3],
"float": [1.0, 2.0, 3.0],
"object": [(1, 2), True, False],
},
columns=["int", "float", "object"],
)
formatters = [
("int", lambda x: f"0x{x:x}"),
("float", lambda x: f"[{x: 4.1f}]"),
("object", lambda x: f"-{x!s}-"),
]
result = df.to_string(formatters=dict(formatters))
result2 = df.to_string(formatters=list(zip(*formatters))[1])
assert result == (
" int float object\n"
"0 0x1 [ 1.0] -(1, 2)-\n"
"1 0x2 [ 2.0] -True-\n"
"2 0x3 [ 3.0] -False-"
)
assert result == result2
def test_to_string_with_datetime64_monthformatter(self):
months = [datetime(2016, 1, 1), datetime(2016, 2, 2)]
x = DataFrame({"months": months})
def format_func(x):
return x.strftime("%Y-%m")
result = x.to_string(formatters={"months": format_func})
expected = "months\n0 2016-01\n1 2016-02"
assert result.strip() == expected
def test_to_string_with_datetime64_hourformatter(self):
x = DataFrame(
{
"hod": pd.to_datetime(
["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f"
)
}
)
def format_func(x):
return x.strftime("%H:%M")
result = x.to_string(formatters={"hod": format_func})
expected = "hod\n0 10:10\n1 12:12"
assert result.strip() == expected
def test_to_string_with_formatters_unicode(self):
df = DataFrame({"c/\u03c3": [1, 2, 3]})
result = df.to_string(formatters={"c/\u03c3": str})
assert result == " c/\u03c3\n" + "0 1\n1 2\n2 3"
def test_east_asian_unicode_false(self):
# not aligned properly because of east asian width
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あああああ あ\n"
"bb い いいい\nc う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\nあああ あああああ あ\n"
"いいいいいい い いいい\nうう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=pd.Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=pd.Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = pd.DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n0 あああああ ... さ\n"
".. ... ... ...\n3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\nあああ あああああ ... さ\n"
".. ... ... ...\naaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
def test_east_asian_unicode_true(self):
# Enable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# mid col
df = DataFrame(
{"a": ["あ", "いいい", "う", "ええええええ"], "b": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na あ 1\n"
"bb いいい 222\nc う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# last col
df = DataFrame(
{"a": [1, 222, 33333, 4], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\na 1 あ\n"
"bb 222 いいい\nc 33333 う\n"
"ddd 4 ええええええ"
)
assert repr(df) == expected
# all col
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" a b\n"
"a あああああ あ\n"
"bb い いいい\n"
"c う う\n"
"ddd えええ ええええええ"
)
assert repr(df) == expected
# column name
df = DataFrame(
{"b": ["あ", "いいい", "う", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "ddd"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c う 33333\n"
"ddd ええええええ 4"
)
assert repr(df) == expected
# index
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=["あああ", "いいいいいい", "うう", "え"],
)
expected = (
" a b\n"
"あああ あああああ あ\n"
"いいいいいい い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# index name
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=pd.Index(["あ", "い", "うう", "え"], name="おおおお"),
)
expected = (
" a b\n"
"おおおお \n"
"あ あああああ あ\n"
"い い いいい\n"
"うう う う\n"
"え えええ ええええええ"
)
assert repr(df) == expected
# all
df = DataFrame(
{"あああ": ["あああ", "い", "う", "えええええ"], "いいいいい": ["あ", "いいい", "う", "ええ"]},
index=pd.Index(["あ", "いいい", "うう", "え"], name="お"),
)
expected = (
" あああ いいいいい\n"
"お \n"
"あ あああ あ\n"
"いいい い いいい\n"
"うう う う\n"
"え えええええ ええ"
)
assert repr(df) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
df = DataFrame(
{"a": ["あああああ", "い", "う", "えええ"], "b": ["あ", "いいい", "う", "ええええええ"]},
index=idx,
)
expected = (
" a b\n"
"あ いい あああああ あ\n"
"う え い いいい\n"
"おおお かかかか う う\n"
"き くく えええ ええええええ"
)
assert repr(df) == expected
# truncate
with option_context("display.max_rows", 3, "display.max_columns", 3):
df = pd.DataFrame(
{
"a": ["あああああ", "い", "う", "えええ"],
"b": ["あ", "いいい", "う", "ええええええ"],
"c": ["お", "か", "ききき", "くくくくくく"],
"ああああ": ["さ", "し", "す", "せ"],
},
columns=["a", "b", "c", "ああああ"],
)
expected = (
" a ... ああああ\n"
"0 あああああ ... さ\n"
".. ... ... ...\n"
"3 えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
df.index = ["あああ", "いいいい", "う", "aaa"]
expected = (
" a ... ああああ\n"
"あああ あああああ ... さ\n"
"... ... ... ...\n"
"aaa えええ ... せ\n"
"\n[4 rows x 4 columns]"
)
assert repr(df) == expected
# ambiguous unicode
df = DataFrame(
{"b": ["あ", "いいい", "¡¡", "ええええええ"], "あああああ": [1, 222, 33333, 4]},
index=["a", "bb", "c", "¡¡¡"],
)
expected = (
" b あああああ\n"
"a あ 1\n"
"bb いいい 222\n"
"c ¡¡ 33333\n"
"¡¡¡ ええええええ 4"
)
assert repr(df) == expected
def test_to_string_buffer_all_unicode(self):
buf = StringIO()
empty = DataFrame({"c/\u03c3": Series(dtype=object)})
nonempty = DataFrame({"c/\u03c3": Series([1, 2, 3])})
print(empty, file=buf)
print(nonempty, file=buf)
# this should work
buf.getvalue()
def test_to_string_with_col_space(self):
df = DataFrame(np.random.random(size=(1, 3)))
c10 = len(df.to_string(col_space=10).split("\n")[1])
c20 = len(df.to_string(col_space=20).split("\n")[1])
c30 = len(df.to_string(col_space=30).split("\n")[1])
assert c10 < c20 < c30
# GH 8230
# col_space wasn't being applied with header=False
with_header = df.to_string(col_space=20)
with_header_row1 = with_header.splitlines()[1]
no_header = df.to_string(col_space=20, header=False)
assert len(with_header_row1) == len(no_header)
def test_to_string_truncate_indices(self):
for index in [
tm.makeStringIndex,
tm.makeUnicodeIndex,
tm.makeIntIndex,
tm.makeDateIndex,
tm.makePeriodIndex,
]:
for column in [tm.makeStringIndex]:
for h in [10, 20]:
for w in [10, 20]:
with option_context("display.expand_frame_repr", False):
df = DataFrame(index=index(h), columns=column(w))
with option_context("display.max_rows", 15):
if h == 20:
assert has_vertically_truncated_repr(df)
else:
assert not has_vertically_truncated_repr(df)
with option_context("display.max_columns", 15):
if w == 20:
assert has_horizontally_truncated_repr(df)
else:
assert not (has_horizontally_truncated_repr(df))
with option_context(
"display.max_rows", 15, "display.max_columns", 15
):
if h == 20 and w == 20:
assert has_doubly_truncated_repr(df)
else:
assert not has_doubly_truncated_repr(df)
def test_to_string_truncate_multilevel(self):
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
df = DataFrame(index=arrays, columns=arrays)
with option_context("display.max_rows", 7, "display.max_columns", 7):
assert has_doubly_truncated_repr(df)
def test_truncate_with_different_dtypes(self):
# 11594, 12045
# when truncated the dtypes of the splits can differ
# 11594
import datetime
s = Series(
[datetime.datetime(2012, 1, 1)] * 10
+ [datetime.datetime(1012, 1, 2)]
+ [datetime.datetime(2012, 1, 3)] * 10
)
with pd.option_context("display.max_rows", 8):
result = str(s)
assert "object" in result
# 12045
df = DataFrame({"text": ["some words"] + [None] * 9})
with pd.option_context("display.max_rows", 8, "display.max_columns", 3):
result = str(df)
assert "None" in result
assert "NaN" not in result
def test_truncate_with_different_dtypes_multiindex(self):
# GH#13000
df = DataFrame({"Vals": range(100)})
frame = pd.concat([df], keys=["Sweep"], names=["Sweep", "Index"])
result = repr(frame)
result2 = repr(frame.iloc[:5])
assert result.startswith(result2)
def test_datetimelike_frame(self):
# GH 12211
df = DataFrame(
{"date": [pd.Timestamp("20130101").tz_localize("UTC")] + [pd.NaT] * 5}
)
with option_context("display.max_rows", 5):
result = str(df)
assert "2013-01-01 00:00:00+00:00" in result
assert "NaT" in result
assert "..." in result
assert "[6 rows x 1 columns]" in result
dts = [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5 + [pd.NaT] * 5
df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00-05:00 1\n"
"1 2011-01-01 00:00:00-05:00 2\n"
".. ... ..\n"
"8 NaT 9\n"
"9 NaT 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [pd.NaT] * 5 + [pd.Timestamp("2011-01-01", tz="US/Eastern")] * 5
df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 NaT 1\n"
"1 NaT 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
dts = [pd.Timestamp("2011-01-01", tz="Asia/Tokyo")] * 5 + [
pd.Timestamp("2011-01-01", tz="US/Eastern")
] * 5
df = pd.DataFrame({"dt": dts, "x": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]})
with option_context("display.max_rows", 5):
expected = (
" dt x\n"
"0 2011-01-01 00:00:00+09:00 1\n"
"1 2011-01-01 00:00:00+09:00 2\n"
".. ... ..\n"
"8 2011-01-01 00:00:00-05:00 9\n"
"9 2011-01-01 00:00:00-05:00 10\n\n"
"[10 rows x 2 columns]"
)
assert repr(df) == expected
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
df = DataFrame({"A": date_range(start=start_date, freq="D", periods=5)})
result = str(df)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
df = DataFrame({"A": range(5)}, index=dti)
result = str(df.index)
assert start_date in result
def test_nonunicode_nonascii_alignment(self):
df = DataFrame([["aa\xc3\xa4\xc3\xa4", 1], ["bbbb", 2]])
rep_str = df.to_string()
lines = rep_str.split("\n")
assert len(lines[1]) == len(lines[2])
def test_unicode_problem_decoding_as_ascii(self):
dm = DataFrame({"c/\u03c3": Series({"test": np.nan})})
str(dm.to_string())
def test_string_repr_encoding(self, datapath):
filepath = datapath("io", "parser", "data", "unicode_series.csv")
df = pd.read_csv(filepath, header=None, encoding="latin1")
repr(df)
repr(df[1])
def test_repr_corner(self):
# representing infs poses no problems
df = DataFrame({"foo": [-np.inf, np.inf]})
repr(df)
def test_frame_info_encoding(self):
index = ["'Til There Was You (1997)", "ldum klaka (Cold Fever) (1994)"]
fmt.set_option("display.max_rows", 1)
df = DataFrame(columns=["a", "b", "c"], index=index)
repr(df)
repr(df.T)
fmt.set_option("display.max_rows", 200)
def test_wide_repr(self):
with option_context(
"mode.sim_interactive",
True,
"display.show_dimensions",
True,
"display.max_columns",
20,
):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
assert f"10 rows x {max_cols - 1} columns" in rep_str
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 120):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_columns(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
df = DataFrame(
np.random.randn(5, 3), columns=["a" * 90, "b" * 90, "c" * 90]
)
rep_str = repr(df)
assert len(rep_str.splitlines()) == 20
def test_wide_repr_named(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
df.index.name = "DataFrame Index"
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "DataFrame Index" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
max_cols = get_option("display.max_columns")
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)), index=midx)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
for line in wide_repr.splitlines()[1::13]:
assert "Level 0 Level 1" in line
reset_option("display.expand_frame_repr")
def test_wide_repr_multiindex_cols(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = get_option("display.max_columns")
midx = MultiIndex.from_arrays(tm.rands_array(5, size=(2, 10)))
mcols = MultiIndex.from_arrays(tm.rands_array(3, size=(2, max_cols - 1)))
df = DataFrame(
tm.rands_array(25, (10, max_cols - 1)), index=midx, columns=mcols
)
df.index.names = ["Level 0", "Level 1"]
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150, "display.max_columns", 20):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_unicode(self):
with option_context("mode.sim_interactive", True, "display.max_columns", 20):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
set_option("display.expand_frame_repr", False)
rep_str = repr(df)
set_option("display.expand_frame_repr", True)
wide_repr = repr(df)
assert rep_str != wide_repr
with option_context("display.width", 150):
wider_repr = repr(df)
assert len(wider_repr) < len(wide_repr)
reset_option("display.expand_frame_repr")
def test_wide_repr_wide_long_columns(self):
with option_context("mode.sim_interactive", True):
df = DataFrame({"a": ["a" * 30, "b" * 30], "b": ["c" * 70, "d" * 80]})
result = repr(df)
assert "ccccc" in result
assert "ddddd" in result
def test_long_series(self):
n = 1000
s = Series(
np.random.randint(-50, 50, n),
index=[f"s{x:04d}" for x in range(n)],
dtype="int64",
)
import re
str_rep = str(s)
nmatches = len(re.findall("dtype", str_rep))
assert nmatches == 1
def test_index_with_nan(self):
# GH 2850
df = DataFrame(
{
"id1": {0: "1a3", 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: "78d", 1: "79d"},
"value": {0: 123, 1: 64},
}
)
# multi-index
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# index
y = df.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nd67 9h4 79d 64"
)
assert result == expected
# with append (this failed in 0.12)
y = df.set_index(["id1", "id2"]).set_index("id3", append=True)
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"1a3 NaN 78d 123\n9h4 d67 79d 64"
)
assert result == expected
# all-nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index("id2")
result = y.to_string()
expected = (
" id1 id3 value\nid2 \n"
"NaN 1a3 78d 123\nNaN 9h4 79d 64"
)
assert result == expected
# partial nan in mi
df2 = df.copy()
df2.loc[:, "id2"] = np.nan
y = df2.set_index(["id2", "id3"])
result = y.to_string()
expected = (
" id1 value\nid2 id3 \n"
"NaN 78d 1a3 123\n 79d 9h4 64"
)
assert result == expected
df = DataFrame(
{
"id1": {0: np.nan, 1: "9h4"},
"id2": {0: np.nan, 1: "d67"},
"id3": {0: np.nan, 1: "79d"},
"value": {0: 123, 1: 64},
}
)
y = df.set_index(["id1", "id2", "id3"])
result = y.to_string()
expected = (
" value\nid1 id2 id3 \n"
"NaN NaN NaN 123\n9h4 d67 79d 64"
)
assert result == expected
def test_to_string(self):
# big mixed
biggie = DataFrame(
{"A": np.random.randn(200), "B": tm.makeStringIndex(200)},
index=np.arange(200),
)
biggie.loc[:20, "A"] = np.nan
biggie.loc[:20, "B"] = np.nan
s = biggie.to_string()
buf = StringIO()
retval = biggie.to_string(buf=buf)
assert retval is None
assert buf.getvalue() == s
assert isinstance(s, str)
# print in right order
result = biggie.to_string(
columns=["B", "A"], col_space=17, float_format="%.5f".__mod__
)
lines = result.split("\n")
header = lines[0].strip().split()
joined = "\n".join(re.sub(r"\s+", " ", x).strip() for x in lines[1:])
recons = read_csv(StringIO(joined), names=header, header=None, sep=" ")
tm.assert_series_equal(recons["B"], biggie["B"])
assert recons["A"].count() == biggie["A"].count()
assert (np.abs(recons["A"].dropna() - biggie["A"].dropna()) < 0.1).all()
# expected = ['B', 'A']
# assert header == expected
result = biggie.to_string(columns=["A"], col_space=17)
header = result.split("\n")[0].strip().split()
expected = ["A"]
assert header == expected
biggie.to_string(columns=["B", "A"], formatters={"A": lambda x: f"{x:.1f}"})
biggie.to_string(columns=["B", "A"], float_format=str)
biggie.to_string(columns=["B", "A"], col_space=12, float_format=str)
frame = DataFrame(index=np.arange(200))
frame.to_string()
def test_to_string_no_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=False)
expected = "0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
def test_to_string_specified_header(self):
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(header=["X", "Y"])
expected = " X Y\n0 1 4\n1 2 5\n2 3 6"
assert df_s == expected
msg = "Writing 2 cols but got 1 aliases"
with pytest.raises(ValueError, match=msg):
df.to_string(header=["X"])
def test_to_string_no_index(self):
# GH 16839, GH 13032
df = DataFrame({"x": [11, 22], "y": [33, -44], "z": ["AAA", " "]})
df_s = df.to_string(index=False)
# Leading space is expected for positive numbers.
expected = " x y z\n 11 33 AAA\n 22 -44 "
assert df_s == expected
df_s = df[["y", "x", "z"]].to_string(index=False)
expected = " y x z\n 33 11 AAA\n-44 22 "
assert df_s == expected
def test_to_string_line_width_no_index(self):
# GH 13998, GH 22505
df = DataFrame({"x": [1, 2, 3], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 1 \n 2 \n 3 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, 33], "y": [4, 5, 6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n 33 \n\n y \n 4 \n 5 \n 6 "
assert df_s == expected
df = DataFrame({"x": [11, 22, -33], "y": [4, 5, -6]})
df_s = df.to_string(line_width=1, index=False)
expected = " x \\\n 11 \n 22 \n-33 \n\n y \n 4 \n 5 \n-6 "
assert df_s == expected
def test_to_string_float_formatting(self):
tm.reset_display_options()
fmt.set_option(
"display.precision",
5,
"display.column_space",
12,
"display.notebook_repr_html",
False,
)
df = DataFrame(
{"x": [0, 0.25, 3456.000, 12e45, 1.64e6, 1.7e8, 1.253456, np.pi, -1e6]}
)
df_s = df.to_string()
if _three_digit_exp():
expected = (
" x\n0 0.00000e+000\n1 2.50000e-001\n"
"2 3.45600e+003\n3 1.20000e+046\n4 1.64000e+006\n"
"5 1.70000e+008\n6 1.25346e+000\n7 3.14159e+000\n"
"8 -1.00000e+006"
)
else:
expected = (
" x\n0 0.00000e+00\n1 2.50000e-01\n"
"2 3.45600e+03\n3 1.20000e+46\n4 1.64000e+06\n"
"5 1.70000e+08\n6 1.25346e+00\n7 3.14159e+00\n"
"8 -1.00000e+06"
)
assert df_s == expected
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string()
expected = " x\n0 3234.000\n1 0.253"
assert df_s == expected
tm.reset_display_options()
assert get_option("display.precision") == 6
df = DataFrame({"x": [1e9, 0.2512]})
df_s = df.to_string()
if _three_digit_exp():
expected = " x\n0 1.000000e+009\n1 2.512000e-001"
else:
expected = " x\n0 1.000000e+09\n1 2.512000e-01"
assert df_s == expected
def test_to_string_float_format_no_fixed_width(self):
# GH 21625
df = DataFrame({"x": [0.19999]})
expected = " x\n0 0.200"
assert df.to_string(float_format="%.3f") == expected
# GH 22270
df = DataFrame({"x": [100.0]})
expected = " x\n0 100"
assert df.to_string(float_format="%.0f") == expected
def test_to_string_small_float_values(self):
df = DataFrame({"a": [1.5, 1e-17, -5.5e-7]})
result = df.to_string()
# sadness per above
if _three_digit_exp():
expected = (
" a\n"
"0 1.500000e+000\n"
"1 1.000000e-017\n"
"2 -5.500000e-007"
)
else:
expected = (
" a\n"
"0 1.500000e+00\n"
"1 1.000000e-17\n"
"2 -5.500000e-07"
)
assert result == expected
# but not all exactly zero
df = df * 0
result = df.to_string()
expected = " 0\n0 0\n1 0\n2 -0"
def test_to_string_float_index(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.arange(5), index=index)
result = df.to_string()
expected = " 0\n1.5 0\n2.0 1\n3.0 2\n4.0 3\n5.0 4"
assert result == expected
def test_to_string_complex_float_formatting(self):
# GH #25514, 25745
with pd.option_context("display.precision", 5):
df = DataFrame(
{
"x": [
(0.4467846931321966 + 0.0715185102060818j),
(0.2739442392974528 + 0.23515228785438969j),
(0.26974928742135185 + 0.3250604054898979j),
(-1j),
]
}
)
result = df.to_string()
expected = (
" x\n0 0.44678+0.07152j\n"
"1 0.27394+0.23515j\n"
"2 0.26975+0.32506j\n"
"3 -0.00000-1.00000j"
)
assert result == expected
def test_to_string_ascii_error(self):
data = [
(
"0 ",
" .gitignore ",
" 5 ",
" \xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2\xe2\x80\xa2",
)
]
df = DataFrame(data)
# it works!
repr(df)
def test_to_string_int_formatting(self):
df = DataFrame({"x": [-15, 20, 25, -35]})
assert issubclass(df["x"].dtype.type, np.integer)
output = df.to_string()
expected = " x\n0 -15\n1 20\n2 25\n3 -35"
assert output == expected
def test_to_string_index_formatter(self):
df = DataFrame([range(5), range(5, 10), range(10, 15)])
rs = df.to_string(formatters={"__index__": lambda x: "abc"[x]})
xp = """\
0 1 2 3 4
a 0 1 2 3 4
b 5 6 7 8 9
c 10 11 12 13 14\
"""
assert rs == xp
def test_to_string_left_justify_cols(self):
tm.reset_display_options()
df = DataFrame({"x": [3234, 0.253]})
df_s = df.to_string(justify="left")
expected = " x \n0 3234.000\n1 0.253"
assert df_s == expected
def test_to_string_format_na(self):
tm.reset_display_options()
df = DataFrame(
{
"A": [np.nan, -1, -2.1234, 3, 4],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0000 foo\n"
"2 -2.1234 foooo\n"
"3 3.0000 fooooo\n"
"4 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [np.nan, -1.0, -2.0, 3.0, 4.0],
"B": [np.nan, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 NaN NaN\n"
"1 -1.0 foo\n"
"2 -2.0 foooo\n"
"3 3.0 fooooo\n"
"4 4.0 bar"
)
assert result == expected
def test_to_string_format_inf(self):
# Issue #24861
tm.reset_display_options()
df = DataFrame(
{
"A": [-np.inf, np.inf, -1, -2.1234, 3, 4],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0000 foo\n"
"3 -2.1234 foooo\n"
"4 3.0000 fooooo\n"
"5 4.0000 bar"
)
assert result == expected
df = DataFrame(
{
"A": [-np.inf, np.inf, -1.0, -2.0, 3.0, 4.0],
"B": [-np.inf, np.inf, "foo", "foooo", "fooooo", "bar"],
}
)
result = df.to_string()
expected = (
" A B\n"
"0 -inf -inf\n"
"1 inf inf\n"
"2 -1.0 foo\n"
"3 -2.0 foooo\n"
"4 3.0 fooooo\n"
"5 4.0 bar"
)
assert result == expected
def test_to_string_decimal(self):
# Issue #23614
df = DataFrame({"A": [6.0, 3.1, 2.2]})
expected = " A\n0 6,0\n1 3,1\n2 2,2"
assert df.to_string(decimal=",") == expected
def test_to_string_line_width(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
s = df.to_string(line_width=80)
assert max(len(l) for l in s.split("\n")) == 80
def test_show_dimensions(self):
df = DataFrame(123, index=range(10, 15), columns=range(30))
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
True,
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
False,
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
with option_context(
"display.max_rows",
2,
"display.max_columns",
2,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" in str(df)
assert "5 rows" in df._repr_html_()
with option_context(
"display.max_rows",
10,
"display.max_columns",
40,
"display.width",
500,
"display.expand_frame_repr",
"info",
"display.show_dimensions",
"truncate",
):
assert "5 rows" not in str(df)
assert "5 rows" not in df._repr_html_()
def test_repr_html(self, float_frame):
df = float_frame
df._repr_html_()
fmt.set_option("display.max_rows", 1, "display.max_columns", 1)
df._repr_html_()
fmt.set_option("display.notebook_repr_html", False)
df._repr_html_()
tm.reset_display_options()
df = DataFrame([[1, 2], [3, 4]])
fmt.set_option("display.show_dimensions", True)
assert "2 rows" in df._repr_html_()
fmt.set_option("display.show_dimensions", False)
assert "2 rows" not in df._repr_html_()
tm.reset_display_options()
def test_repr_html_mathjax(self):
df = DataFrame([[1, 2], [3, 4]])
assert "tex2jax_ignore" not in df._repr_html_()
with pd.option_context("display.html.use_mathjax", False):
assert "tex2jax_ignore" in df._repr_html_()
def test_repr_html_wide(self):
max_cols = 20
df = DataFrame(tm.rands_array(25, size=(10, max_cols - 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
wide_df = DataFrame(tm.rands_array(25, size=(10, max_cols + 1)))
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in wide_df._repr_html_()
def test_repr_html_wide_multiindex_cols(self):
max_cols = 20
mcols = MultiIndex.from_product(
[np.arange(max_cols // 2), ["foo", "bar"]], names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
reg_repr = df._repr_html_()
assert "..." not in reg_repr
mcols = MultiIndex.from_product(
(np.arange(1 + (max_cols // 2)), ["foo", "bar"]), names=["first", "second"]
)
df = DataFrame(tm.rands_array(25, size=(10, len(mcols))), columns=mcols)
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_repr_html_long(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert str(41 + max_rows // 2) in reg_repr
h = max_rows + 1
df = DataFrame({"A": np.arange(1, 1 + h), "B": np.arange(41, 41 + h)})
long_repr = df._repr_html_()
assert ".." in long_repr
assert str(41 + max_rows // 2) not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_float(self):
with option_context("display.max_rows", 60):
max_rows = get_option("display.max_rows")
h = max_rows - 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
reg_repr = df._repr_html_()
assert ".." not in reg_repr
assert f"<td>{40 + h}</td>" in reg_repr
h = max_rows + 1
df = DataFrame(
{
"idx": np.linspace(-10, 10, h),
"A": np.arange(1, 1 + h),
"B": np.arange(41, 41 + h),
}
).set_index("idx")
long_repr = df._repr_html_()
assert ".." in long_repr
assert "<td>31</td>" not in long_repr
assert f"{h} rows " in long_repr
assert "2 columns" in long_repr
def test_repr_html_long_multiindex(self):
max_rows = 60
max_L1 = max_rows // 2
tuples = list(itertools.product(np.arange(max_L1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(np.random.randn(max_L1 * 2, 2), index=idx, columns=["A", "B"])
with option_context("display.max_rows", 60, "display.max_columns", 20):
reg_repr = df._repr_html_()
assert "..." not in reg_repr
tuples = list(itertools.product(np.arange(max_L1 + 1), ["foo", "bar"]))
idx = MultiIndex.from_tuples(tuples, names=["first", "second"])
df = DataFrame(
np.random.randn((max_L1 + 1) * 2, 2), index=idx, columns=["A", "B"]
)
long_repr = df._repr_html_()
assert "..." in long_repr
def test_repr_html_long_and_wide(self):
max_cols = 20
max_rows = 60
h, w = max_rows - 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." not in df._repr_html_()
h, w = max_rows + 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
with option_context("display.max_rows", 60, "display.max_columns", 20):
assert "..." in df._repr_html_()
def test_info_repr(self):
# GH#21746 For tests inside a terminal (i.e. not CI) we need to detect
# the terminal size to ensure that we try to print something "too big"
term_width, term_height = get_terminal_size()
max_rows = 60
max_cols = 20 + (max(term_width, 80) - 80) // 4
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_vertically_truncated_repr(df)
with option_context("display.large_repr", "info"):
assert has_info_repr(df)
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert has_horizontally_truncated_repr(df)
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert has_info_repr(df)
def test_info_repr_max_cols(self):
# GH #6939
df = DataFrame(np.random.randn(10, 5))
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
4,
):
assert has_non_verbose_info_repr(df)
with option_context(
"display.large_repr",
"info",
"display.max_columns",
1,
"display.max_info_columns",
5,
):
assert not has_non_verbose_info_repr(df)
# test verbose overrides
# fmt.set_option('display.max_info_columns', 4) # exceeded
def test_info_repr_html(self):
max_rows = 60
max_cols = 20
# Long
h, w = max_rows + 1, max_cols - 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert r"<class" not in df._repr_html_()
with option_context("display.large_repr", "info"):
assert r"<class" in df._repr_html_()
# Wide
h, w = max_rows - 1, max_cols + 1
df = DataFrame({k: np.arange(1, 1 + h) for k in np.arange(w)})
assert "<class" not in df._repr_html_()
with option_context(
"display.large_repr", "info", "display.max_columns", max_cols
):
assert "<class" in df._repr_html_()
def test_fake_qtconsole_repr_html(self, float_frame):
df = float_frame
def get_ipython():
return {"config": {"KernelApp": {"parent_appname": "ipython-qtconsole"}}}
repstr = df._repr_html_()
assert repstr is not None
fmt.set_option("display.max_rows", 5, "display.max_columns", 2)
repstr = df._repr_html_()
assert "class" in repstr # info fallback
tm.reset_display_options()
def test_pprint_pathological_object(self):
"""
If the test fails, it at least won't hang.
"""
class A:
def __getitem__(self, key):
return 3 # obviously simplified
df = DataFrame([A()])
repr(df) # just don't die
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
skip = True
for line in repr(DataFrame({"A": vals})).split("\n")[:-2]:
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert ("+010" in line) or skip
else:
assert ("+10" in line) or skip
skip = False
def test_dict_entries(self):
df = DataFrame({"A": [{"a": 1, "b": 2}]})
val = df.to_string()
assert "'a': 1" in val
assert "'b': 2" in val
def test_period(self):
# GH 12615
df = pd.DataFrame(
{
"A": pd.period_range("2013-01", periods=4, freq="M"),
"B": [
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
pd.Period("2011-04", freq="M"),
],
"C": list("abcd"),
}
)
exp = (
" A B C\n"
"0 2013-01 2011-01 a\n"
"1 2013-02 2011-02-01 b\n"
"2 2013-03 2011-03-01 09:00 c\n"
"3 2013-04 2011-04 d"
)
assert str(df) == exp
def gen_series_formatting():
s1 = pd.Series(["a"] * 100)
s2 = pd.Series(["ab"] * 100)
s3 = pd.Series(["a", "ab", "abc", "abcd", "abcde", "abcdef"])
s4 = s3[::-1]
test_sers = {"onel": s1, "twol": s2, "asc": s3, "desc": s4}
return test_sers
class TestSeriesFormatting:
def setup_method(self, method):
self.ts = tm.makeTimeSeries()
def test_repr_unicode(self):
s = Series(["\u03c3"] * 10)
repr(s)
a = Series(["\u05d0"] * 1000)
a.name = "title1"
repr(a)
def test_to_string(self):
buf = StringIO()
s = self.ts.to_string()
retval = self.ts.to_string(buf=buf)
assert retval is None
assert buf.getvalue().strip() == s
# pass float_format
format = "%.4f".__mod__
result = self.ts.to_string(float_format=format)
result = [x.split()[1] for x in result.split("\n")[:-1]]
expected = [format(x) for x in self.ts]
assert result == expected
# empty string
result = self.ts[:0].to_string()
assert result == "Series([], Freq: B)"
result = self.ts[:0].to_string(length=0)
assert result == "Series([], Freq: B)"
# name and length
cp = self.ts.copy()
cp.name = "foo"
result = cp.to_string(length=True, name=True, dtype=True)
last_line = result.split("\n")[-1].strip()
assert last_line == (f"Freq: B, Name: foo, Length: {len(cp)}, dtype: float64")
def test_freq_name_separation(self):
s = Series(
np.random.randn(10), index=date_range("1/1/2000", periods=10), name=0
)
result = repr(s)
assert "Freq: D, Name: 0" in result
def test_to_string_mixed(self):
s = Series(["foo", np.nan, -1.23, 4.56])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 -1.23\n" + "3 4.56"
assert result == expected
# but don't count NAs as floats
s = Series(["foo", np.nan, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 NaN\n" + "2 bar\n" + "3 baz"
assert result == expected
s = Series(["foo", 5, "bar", "baz"])
result = s.to_string()
expected = "0 foo\n" + "1 5\n" + "2 bar\n" + "3 baz"
assert result == expected
def test_to_string_float_na_spacing(self):
s = Series([0.0, 1.5678, 2.0, -3.0, 4.0])
s[::2] = np.nan
result = s.to_string()
expected = (
"0 NaN\n"
+ "1 1.5678\n"
+ "2 NaN\n"
+ "3 -3.0000\n"
+ "4 NaN"
)
assert result == expected
def test_to_string_without_index(self):
# GH 11729 Test index=False option
s = Series([1, 2, 3, 4])
result = s.to_string(index=False)
expected = " 1\n" + " 2\n" + " 3\n" + " 4"
assert result == expected
def test_unicode_name_in_footer(self):
s = Series([1, 2], name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf = fmt.SeriesFormatter(s, name="\u05e2\u05d1\u05e8\u05d9\u05ea")
sf._get_footer() # should not raise exception
def test_east_asian_unicode_series(self):
# not aligned properly because of east asian width
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = "あ a\nいい bb\nううう CCC\nええええ D\ndtype: object"
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = "a あ\nbb いい\nc ううう\nddd ええええ\ndtype: object"
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\nいいいい いい\nう ううう\nえええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"], name="おおおおおおお"
)
expected = (
"ああ あ\nいいいい いい\nう ううう\n"
"えええ ええええ\nName: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\nあああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444], index=[1, "AB", pd.Timestamp("2011-01-01"), "あああ"]
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# Emable Unicode option -----------------------------------------
with option_context("display.unicode.east_asian_width", True):
# unicode index
s = Series(["a", "bb", "CCC", "D"], index=["あ", "いい", "ううう", "ええええ"])
expected = (
"あ a\nいい bb\nううう CCC\n"
"ええええ D\ndtype: object"
)
assert repr(s) == expected
# unicode values
s = Series(["あ", "いい", "ううう", "ええええ"], index=["a", "bb", "c", "ddd"])
expected = (
"a あ\nbb いい\nc ううう\n"
"ddd ええええ\ndtype: object"
)
assert repr(s) == expected
# both
s = Series(["あ", "いい", "ううう", "ええええ"], index=["ああ", "いいいい", "う", "えええ"])
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
# unicode footer
s = Series(
["あ", "いい", "ううう", "ええええ"],
index=["ああ", "いいいい", "う", "えええ"],
name="おおおおおおお",
)
expected = (
"ああ あ\n"
"いいいい いい\n"
"う ううう\n"
"えええ ええええ\n"
"Name: おおおおおおお, dtype: object"
)
assert repr(s) == expected
# MultiIndex
idx = pd.MultiIndex.from_tuples(
[("あ", "いい"), ("う", "え"), ("おおお", "かかかか"), ("き", "くく")]
)
s = Series([1, 22, 3333, 44444], index=idx)
expected = (
"あ いい 1\n"
"う え 22\n"
"おおお かかかか 3333\n"
"き くく 44444\n"
"dtype: int64"
)
assert repr(s) == expected
# object dtype, shorter than unicode repr
s = Series([1, 22, 3333, 44444], index=[1, "AB", np.nan, "あああ"])
expected = (
"1 1\nAB 22\nNaN 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# object dtype, longer than unicode repr
s = Series(
[1, 22, 3333, 44444],
index=[1, "AB", pd.Timestamp("2011-01-01"), "あああ"],
)
expected = (
"1 1\n"
"AB 22\n"
"2011-01-01 00:00:00 3333\n"
"あああ 44444\ndtype: int64"
)
assert repr(s) == expected
# truncate
with option_context("display.max_rows", 3):
s = Series(["あ", "いい", "ううう", "ええええ"], name="おおおおおおお")
expected = (
"0 あ\n ... \n"
"3 ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
s.index = ["ああ", "いいいい", "う", "えええ"]
expected = (
"ああ あ\n"
" ... \n"
"えええ ええええ\n"
"Name: おおおおおおお, Length: 4, dtype: object"
)
assert repr(s) == expected
# ambiguous unicode
s = Series(
["¡¡", "い¡¡", "ううう", "ええええ"], index=["ああ", "¡¡¡¡いい", "¡¡", "えええ"]
)
expected = (
"ああ ¡¡\n"
"¡¡¡¡いい い¡¡\n"
"¡¡ ううう\n"
"えええ ええええ\ndtype: object"
)
assert repr(s) == expected
def test_float_trim_zeros(self):
vals = [
2.08430917305e10,
3.52205017305e10,
2.30674817305e10,
2.03954217305e10,
5.59897817305e10,
]
for line in repr(Series(vals)).split("\n"):
if line.startswith("dtype:"):
continue
if _three_digit_exp():
assert "+010" in line
else:
assert "+10" in line
def test_datetimeindex(self):
index = date_range("20130102", periods=6)
s = Series(1, index=index)
result = s.to_string()
assert "2013-01-02" in result
# nat in index
s2 = Series(2, index=[Timestamp("20130111"), NaT])
s = s2.append(s)
result = s.to_string()
assert "NaT" in result
# nat in summary
result = str(s2.index)
assert "NaT" in result
@pytest.mark.parametrize(
"start_date",
[
"2017-01-01 23:59:59.999999999",
"2017-01-01 23:59:59.99999999",
"2017-01-01 23:59:59.9999999",
"2017-01-01 23:59:59.999999",
"2017-01-01 23:59:59.99999",
"2017-01-01 23:59:59.9999",
],
)
def test_datetimeindex_highprecision(self, start_date):
# GH19030
# Check that high-precision time values for the end of day are
# included in repr for DatetimeIndex
s1 = Series(date_range(start=start_date, freq="D", periods=5))
result = str(s1)
assert start_date in result
dti = date_range(start=start_date, freq="D", periods=5)
s2 = Series(3, index=dti)
result = str(s2.index)
assert start_date in result
def test_timedelta64(self):
from datetime import datetime, timedelta
Series(np.array([1100, 20], dtype="timedelta64[ns]")).to_string()
s = Series(date_range("2012-1-1", periods=3, freq="D"))
# GH2146
# adding NaTs
y = s - s.shift(1)
result = y.to_string()
assert "1 days" in result
assert "00:00:00" not in result
assert "NaT" in result
# with frac seconds
o = Series([datetime(2012, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:59:59.999850" in result
# rounding?
o = Series([datetime(2012, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +23:00:00" in result
assert "1 days 23:00:00" in result
o = Series([datetime(2012, 1, 1, 1, 1)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:59:00" in result
assert "1 days 22:59:00" in result
o = Series([datetime(2012, 1, 1, 1, 1, microsecond=150)] * 3)
y = s - o
result = y.to_string()
assert "-1 days +22:58:59.999850" in result
assert "0 days 22:58:59.999850" in result
# neg time
td = timedelta(minutes=5, seconds=3)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - s2
result = y.to_string()
assert "-1 days +23:54:57" in result
td = timedelta(microseconds=550)
s2 = Series(date_range("2012-1-1", periods=3, freq="D")) + td
y = s - td
result = y.to_string()
assert "2012-01-01 23:59:59.999450" in result
# no boxing of the actual elements
td = Series(pd.timedelta_range("1 days", periods=3))
result = td.to_string()
assert result == "0 1 days\n1 2 days\n2 3 days"
def test_mixed_datetime64(self):
df = DataFrame({"A": [1, 2], "B": ["2012-01-01", "2012-01-02"]})
df["B"] = pd.to_datetime(df.B)
result = repr(df.loc[0])
assert "2012-01-01" in result
def test_period(self):
# GH 12615
index = pd.period_range("2013-01", periods=6, freq="M")
s = Series(np.arange(6, dtype="int64"), index=index)
exp = (
"2013-01 0\n"
"2013-02 1\n"
"2013-03 2\n"
"2013-04 3\n"
"2013-05 4\n"
"2013-06 5\n"
"Freq: M, dtype: int64"
)
assert str(s) == exp
s = Series(index)
exp = (
"0 2013-01\n"
"1 2013-02\n"
"2 2013-03\n"
"3 2013-04\n"
"4 2013-05\n"
"5 2013-06\n"
"dtype: period[M]"
)
assert str(s) == exp
# periods with mixed freq
s = Series(
[
pd.Period("2011-01", freq="M"),
pd.Period("2011-02-01", freq="D"),
pd.Period("2011-03-01 09:00", freq="H"),
]
)
exp = (
"0 2011-01\n1 2011-02-01\n"
"2 2011-03-01 09:00\ndtype: object"
)
assert str(s) == exp
def test_max_multi_index_display(self):
# GH 7101
# doc example (indexing.rst)
# multi-index
arrays = [
["bar", "bar", "baz", "baz", "foo", "foo", "qux", "qux"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = list(zip(*arrays))
index = MultiIndex.from_tuples(tuples, names=["first", "second"])
s = Series(np.random.randn(8), index=index)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 10
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 5
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 10
# index
s = Series(np.random.randn(8), None)
with option_context("display.max_rows", 10):
assert len(str(s).split("\n")) == 9
with option_context("display.max_rows", 3):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 2):
assert len(str(s).split("\n")) == 4
with option_context("display.max_rows", 1):
assert len(str(s).split("\n")) == 3
with option_context("display.max_rows", 0):
assert len(str(s).split("\n")) == 9
# Make sure #8532 is fixed
def test_consistent_format(self):
s = pd.Series([1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0.9999, 1, 1] * 10)
with option_context("display.max_rows", 10, "display.show_dimensions", False):
res = repr(s)
exp = (
"0 1.0000\n1 1.0000\n2 1.0000\n3 "
"1.0000\n4 1.0000\n ... \n125 "
"1.0000\n126 1.0000\n127 0.9999\n128 "
"1.0000\n129 1.0000\ndtype: float64"
)
assert res == exp
def chck_ncols(self, s):
with option_context("display.max_rows", 10):
res = repr(s)
lines = res.split("\n")
lines = [
line for line in repr(s).split("\n") if not re.match(r"[^\.]*\.+", line)
][:-1]
ncolsizes = len({len(line.strip()) for line in lines})
assert ncolsizes == 1
def test_format_explicit(self):
test_sers = gen_series_formatting()
with option_context("display.max_rows", 4, "display.show_dimensions", False):
res = repr(test_sers["onel"])
exp = "0 a\n1 a\n ..\n98 a\n99 a\ndtype: object"
assert exp == res
res = repr(test_sers["twol"])
exp = "0 ab\n1 ab\n ..\n98 ab\n99 ab\ndtype: object"
assert exp == res
res = repr(test_sers["asc"])
exp = (
"0 a\n1 ab\n ... \n4 abcde\n5 "
"abcdef\ndtype: object"
)
assert exp == res
res = repr(test_sers["desc"])
exp = (
"5 abcdef\n4 abcde\n ... \n1 ab\n0 "
"a\ndtype: object"
)
assert exp == res
def test_ncols(self):
test_sers = gen_series_formatting()
for s in test_sers.values():
self.chck_ncols(s)
def test_max_rows_eq_one(self):
s = Series(range(10), dtype="int64")
with option_context("display.max_rows", 1):
strrepr = repr(s).split("\n")
exp1 = ["0", "0"]
res1 = strrepr[0].split()
assert exp1 == res1
exp2 = [".."]
res2 = strrepr[1].split()
assert exp2 == res2
def test_truncate_ndots(self):
def getndots(s):
return len(re.match(r"[^\.]*(\.*)", s).groups()[0])
s = Series([0, 2, 3, 6])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 2
s = Series([0, 100, 200, 400])
with option_context("display.max_rows", 2):
strrepr = repr(s).replace("\n", "")
assert getndots(strrepr) == 3
def test_show_dimensions(self):
# gh-7117
s = Series(range(5))
assert "Length" not in repr(s)
with option_context("display.max_rows", 4):
assert "Length" in repr(s)
with option_context("display.show_dimensions", True):
assert "Length" in repr(s)
with option_context("display.max_rows", 4, "display.show_dimensions", False):
assert "Length" not in repr(s)
def test_repr_min_rows(self):
s = pd.Series(range(20))
# default setting no truncation even if above min_rows
assert ".." not in repr(s)
s = pd.Series(range(61))
# default of max_rows 60 triggers truncation if above
assert ".." in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 4):
# truncated after first two rows
assert ".." in repr(s)
assert "2 " not in repr(s)
with option_context("display.max_rows", 12, "display.min_rows", None):
# when set to None, follow value of max_rows
assert "5 5" in repr(s)
with option_context("display.max_rows", 10, "display.min_rows", 12):
# when set value higher as max_rows, use the minimum
assert "5 5" not in repr(s)
with option_context("display.max_rows", None, "display.min_rows", 12):
# max_rows of None -> never truncate
assert ".." not in repr(s)
def test_to_string_name(self):
s = Series(range(100), dtype="int64")
s.name = "myser"
res = s.to_string(max_rows=2, name=True)
exp = "0 0\n ..\n99 99\nName: myser"
assert res == exp
res = s.to_string(max_rows=2, name=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_dtype(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, dtype=True)
exp = "0 0\n ..\n99 99\ndtype: int64"
assert res == exp
res = s.to_string(max_rows=2, dtype=False)
exp = "0 0\n ..\n99 99"
assert res == exp
def test_to_string_length(self):
s = Series(range(100), dtype="int64")
res = s.to_string(max_rows=2, length=True)
exp = "0 0\n ..\n99 99\nLength: 100"
assert res == exp
def test_to_string_na_rep(self):
s = pd.Series(index=range(100), dtype=np.float64)
res = s.to_string(na_rep="foo", max_rows=2)
exp = "0 foo\n ..\n99 foo"
assert res == exp
def test_to_string_float_format(self):
s = pd.Series(range(10), dtype="float64")
res = s.to_string(float_format=lambda x: f"{x:2.1f}", max_rows=2)
exp = "0 0.0\n ..\n9 9.0"
assert res == exp
def test_to_string_header(self):
s = pd.Series(range(10), dtype="int64")
s.index.name = "foo"
res = s.to_string(header=True, max_rows=2)
exp = "foo\n0 0\n ..\n9 9"
assert res == exp
res = s.to_string(header=False, max_rows=2)
exp = "0 0\n ..\n9 9"
assert res == exp
def test_to_string_multindex_header(self):
# GH 16718
df = pd.DataFrame({"a": [0], "b": [1], "c": [2], "d": [3]}).set_index(
["a", "b"]
)
res = df.to_string(header=["r1", "r2"])
exp = " r1 r2\na b \n0 1 2 3"
assert res == exp
def _three_digit_exp():
return f"{1.7e8:.4g}" == "1.7e+008"
class TestFloatArrayFormatter:
def test_misc(self):
obj = fmt.FloatArrayFormatter(np.array([], dtype=np.float64))
result = obj.get_result()
assert len(result) == 0
def test_format(self):
obj = fmt.FloatArrayFormatter(np.array([12, 0], dtype=np.float64))
result = obj.get_result()
assert result[0] == " 12.0"
assert result[1] == " 0.0"
def test_output_significant_digits(self):
# Issue #9764
# In case default display precision changes:
with pd.option_context("display.precision", 6):
# DataFrame example from issue #9764
d = pd.DataFrame(
{
"col1": [
9.999e-8,
1e-7,
1.0001e-7,
2e-7,
4.999e-7,
5e-7,
5.0001e-7,
6e-7,
9.999e-7,
1e-6,
1.0001e-6,
2e-6,
4.999e-6,
5e-6,
5.0001e-6,
6e-6,
]
}
)
expected_output = {
(0, 6): " col1\n"
"0 9.999000e-08\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 6): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07",
(1, 8): " col1\n"
"1 1.000000e-07\n"
"2 1.000100e-07\n"
"3 2.000000e-07\n"
"4 4.999000e-07\n"
"5 5.000000e-07\n"
"6 5.000100e-07\n"
"7 6.000000e-07",
(8, 16): " col1\n"
"8 9.999000e-07\n"
"9 1.000000e-06\n"
"10 1.000100e-06\n"
"11 2.000000e-06\n"
"12 4.999000e-06\n"
"13 5.000000e-06\n"
"14 5.000100e-06\n"
"15 6.000000e-06",
(9, 16): " col1\n"
"9 0.000001\n"
"10 0.000001\n"
"11 0.000002\n"
"12 0.000005\n"
"13 0.000005\n"
"14 0.000005\n"
"15 0.000006",
}
for (start, stop), v in expected_output.items():
assert str(d[start:stop]) == v
def test_too_long(self):
# GH 10451
with pd.option_context("display.precision", 4):
# need both a number > 1e6 and something that normally formats to
# having length > display.precision + 6
df = pd.DataFrame(dict(x=[12345.6789]))
assert str(df) == " x\n0 12345.6789"
df = pd.DataFrame(dict(x=[2e6]))
assert str(df) == " x\n0 2000000.0"
df = pd.DataFrame(dict(x=[12345.6789, 2e6]))
assert str(df) == " x\n0 1.2346e+04\n1 2.0000e+06"
class TestRepr_timedelta64:
def test_none(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base()
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "0 days"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_sub_day(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="sub_day")
assert drepr(delta_1d) == "1 days"
assert drepr(-delta_1d) == "-1 days"
assert drepr(delta_0d) == "00:00:00"
assert drepr(delta_1s) == "00:00:01"
assert drepr(delta_500ms) == "00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_long(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1s = pd.to_timedelta(1, unit="s")
delta_500ms = pd.to_timedelta(500, unit="ms")
drepr = lambda x: x._repr_base(format="long")
assert drepr(delta_1d) == "1 days 00:00:00"
assert drepr(-delta_1d) == "-1 days +00:00:00"
assert drepr(delta_0d) == "0 days 00:00:00"
assert drepr(delta_1s) == "0 days 00:00:01"
assert drepr(delta_500ms) == "0 days 00:00:00.500000"
assert drepr(delta_1d + delta_1s) == "1 days 00:00:01"
assert drepr(-delta_1d + delta_1s) == "-1 days +00:00:01"
assert drepr(delta_1d + delta_500ms) == "1 days 00:00:00.500000"
assert drepr(-delta_1d + delta_500ms) == "-1 days +00:00:00.500000"
def test_all(self):
delta_1d = pd.to_timedelta(1, unit="D")
delta_0d = pd.to_timedelta(0, unit="D")
delta_1ns = pd.to_timedelta(1, unit="ns")
drepr = lambda x: x._repr_base(format="all")
assert drepr(delta_1d) == "1 days 00:00:00.000000000"
assert drepr(-delta_1d) == "-1 days +00:00:00.000000000"
assert drepr(delta_0d) == "0 days 00:00:00.000000000"
assert drepr(delta_1ns) == "0 days 00:00:00.000000001"
assert drepr(-delta_1d + delta_1ns) == "-1 days +00:00:00.000000001"
class TestTimedelta64Formatter:
def test_days(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x[1:2], box=True).get_result()
assert result[0].strip() == "'1 days'"
result = fmt.Timedelta64Formatter(x, box=False).get_result()
assert result[0].strip() == "0 days"
assert result[1].strip() == "1 days"
result = fmt.Timedelta64Formatter(x[1:2], box=False).get_result()
assert result[0].strip() == "1 days"
def test_days_neg(self):
x = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(-x, box=True).get_result()
assert result[0].strip() == "'0 days'"
assert result[1].strip() == "'-1 days'"
def test_subdays(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s")
result = fmt.Timedelta64Formatter(y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'00:00:01'"
def test_subdays_neg(self):
y = pd.to_timedelta(list(range(5)) + [pd.NaT], unit="s")
result = fmt.Timedelta64Formatter(-y, box=True).get_result()
assert result[0].strip() == "'00:00:00'"
assert result[1].strip() == "'-1 days +23:59:59'"
def test_zero(self):
x = pd.to_timedelta(list(range(1)) + [pd.NaT], unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
x = pd.to_timedelta(list(range(1)), unit="D")
result = fmt.Timedelta64Formatter(x, box=True).get_result()
assert result[0].strip() == "'0 days'"
class TestDatetime64Formatter:
def test_mixed(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 1, 12), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 00:00:00"
assert result[1].strip() == "2013-01-01 12:00:00"
def test_dates(self):
x = Series([datetime(2013, 1, 1), datetime(2013, 1, 2), pd.NaT])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01"
assert result[1].strip() == "2013-01-02"
def test_date_nanos(self):
x = Series([Timestamp(200)])
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "1970-01-01 00:00:00.000000200"
def test_dates_display(self):
# 10170
# make sure that we are consistently display date formatting
x = Series(date_range("20130101 09:00:00", periods=5, freq="D"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-05 09:00:00"
x = Series(date_range("20130101 09:00:00", periods=5, freq="s"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:04"
x = Series(date_range("20130101 09:00:00", periods=5, freq="ms"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="us"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000004"
x = Series(date_range("20130101 09:00:00", periods=5, freq="N"))
x.iloc[1] = np.nan
result = fmt.Datetime64Formatter(x).get_result()
assert result[0].strip() == "2013-01-01 09:00:00.000000000"
assert result[1].strip() == "NaT"
assert result[4].strip() == "2013-01-01 09:00:00.000000004"
def test_datetime64formatter_yearmonth(self):
x = Series([datetime(2016, 1, 1), datetime(2016, 2, 2)])
def format_func(x):
return x.strftime("%Y-%m")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["2016-01", "2016-02"]
def test_datetime64formatter_hoursecond(self):
x = Series(
pd.to_datetime(["10:10:10.100", "12:12:12.120"], format="%H:%M:%S.%f")
)
def format_func(x):
return x.strftime("%H:%M")
formatter = fmt.Datetime64Formatter(x, formatter=format_func)
result = formatter.get_result()
assert result == ["10:10", "12:12"]
class TestNaTFormatting:
def test_repr(self):
assert repr(pd.NaT) == "NaT"
def test_str(self):
assert str(pd.NaT) == "NaT"
class TestDatetimeIndexFormat:
def test_datetime(self):
formatted = pd.to_datetime([datetime(2003, 1, 1, 12), pd.NaT]).format()
assert formatted[0] == "2003-01-01 12:00:00"
assert formatted[1] == "NaT"
def test_date(self):
formatted = pd.to_datetime([datetime(2003, 1, 1), pd.NaT]).format()
assert formatted[0] == "2003-01-01"
assert formatted[1] == "NaT"
def test_date_tz(self):
formatted = pd.to_datetime([datetime(2013, 1, 1)], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
formatted = pd.to_datetime([datetime(2013, 1, 1), pd.NaT], utc=True).format()
assert formatted[0] == "2013-01-01 00:00:00+00:00"
def test_date_explicit_date_format(self):
formatted = pd.to_datetime([datetime(2003, 2, 1), pd.NaT]).format(
date_format="%m-%d-%Y", na_rep="UT"
)
assert formatted[0] == "02-01-2003"
assert formatted[1] == "UT"
class TestDatetimeIndexUnicode:
def test_dates(self):
text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)]))
assert "['2013-01-01'," in text
assert ", '2014-01-01']" in text
def test_mixed(self):
text = str(
pd.to_datetime(
[datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)]
)
)
assert "'2013-01-01 00:00:00'," in text
assert "'2014-01-01 00:00:00']" in text
class TestStringRepTimestamp:
def test_no_tz(self):
dt_date = datetime(2013, 1, 2)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
ts_nanos_only = Timestamp(200)
assert str(ts_nanos_only) == "1970-01-01 00:00:00.000000200"
ts_nanos_micros = Timestamp(1200)
assert str(ts_nanos_micros) == "1970-01-01 00:00:00.000001200"
def test_tz_pytz(self):
dt_date = datetime(2013, 1, 2, tzinfo=pytz.utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=pytz.utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=pytz.utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_tz_dateutil(self):
utc = dateutil.tz.tzutc()
dt_date = datetime(2013, 1, 2, tzinfo=utc)
assert str(dt_date) == str(Timestamp(dt_date))
dt_datetime = datetime(2013, 1, 2, 12, 1, 3, tzinfo=utc)
assert str(dt_datetime) == str(Timestamp(dt_datetime))
dt_datetime_us = datetime(2013, 1, 2, 12, 1, 3, 45, tzinfo=utc)
assert str(dt_datetime_us) == str(Timestamp(dt_datetime_us))
def test_nat_representations(self):
for f in (str, repr, methodcaller("isoformat")):
assert f(pd.NaT) == "NaT"
def test_format_percentiles():
result = fmt.format_percentiles([0.01999, 0.02001, 0.5, 0.666666, 0.9999])
expected = ["1.999%", "2.001%", "50%", "66.667%", "99.99%"]
assert result == expected
result = fmt.format_percentiles([0, 0.5, 0.02001, 0.5, 0.666666, 0.9999])
expected = ["0%", "50%", "2.0%", "50%", "66.67%", "99.99%"]
assert result == expected
msg = r"percentiles should all be in the interval \[0,1\]"
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, np.nan, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([-0.001, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([2, 0.1, 0.5])
with pytest.raises(ValueError, match=msg):
fmt.format_percentiles([0.1, 0.5, "a"])
def test_format_percentiles_integer_idx():
# Issue #26660
result = fmt.format_percentiles(np.linspace(0, 1, 10 + 1))
expected = [
"0%",
"10%",
"20%",
"30%",
"40%",
"50%",
"60%",
"70%",
"80%",
"90%",
"100%",
]
assert result == expected
def test_repr_html_ipython_config(ip):
code = textwrap.dedent(
"""\
import pandas as pd
df = pd.DataFrame({"A": [1, 2]})
df._repr_html_()
cfg = get_ipython().config
cfg['IPKernelApp']['parent_appname']
df._repr_html_()
"""
)
result = ip.run_cell(code)
assert not result.error_in_exec
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
@pytest.mark.parametrize(
"encoding, data",
[(None, "abc"), ("utf-8", "abc"), ("gbk", "造成输出中文显示乱码"), ("foo", "abc")],
)
def test_filepath_or_buffer_arg(
method,
filepath_or_buffer,
assert_filepath_or_buffer_equals,
encoding,
data,
filepath_or_buffer_id,
):
df = DataFrame([data])
if filepath_or_buffer_id not in ["string", "pathlike"] and encoding is not None:
with pytest.raises(
ValueError, match="buf is not a file name and encoding is specified."
):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
elif encoding == "foo":
with tm.assert_produces_warning(None):
with pytest.raises(LookupError, match="unknown encoding"):
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
else:
expected = getattr(df, method)()
getattr(df, method)(buf=filepath_or_buffer, encoding=encoding)
assert_filepath_or_buffer_equals(expected)
@pytest.mark.parametrize("method", ["to_string", "to_html", "to_latex"])
def test_filepath_or_buffer_bad_arg_raises(float_frame, method):
msg = "buf is not a file name and it has no write method"
with pytest.raises(TypeError, match=msg):
getattr(float_frame, method)(buf=object())
|
py
|
1a5d12e10cc8787c8120940f3bc746ef1d9956d2
|
# ! Warning !
# This code is the worst code
# I have ever writen. Reading
# it can cause and has caused
# permanent eye damage.
import sys
file = open(sys.argv[1])
assembly = file.read()
file.close()
tokens = assembly.split()
output = ""
curInstr = ""
jump = False
for tok in tokens:
if tok == "add":
curInstr += "0001"
elif tok == "addi":
curInstr += "0010"
elif tok == "jmp":
curInstr += "0011000000"
elif tok == "jeq":
curInstr += "0100"
elif tok == "store":
curInstr += "0101"
elif tok == "load":
curInstr += "0110"
elif tok == "xor":
curInstr += "0111"
elif tok == "and":
curInstr += "1000"
elif tok[0] == "r":
curInstr += "{0:03b}".format(int(tok[1]))
else:
curInstr += "{0:016b}".format(int(tok))
if len(curInstr) == 26:
output += curInstr + "\n"
curInstr = ""
elif len(curInstr) == 13:
curInstr += "0000000000000\n"
output += curInstr
curInstr = ""
file = open(sys.argv[2], "w")
file.write(output)
|
py
|
1a5d144205fa821491245cfcbdc3ded05d4122fc
|
from setuptools import find_packages, setup
setup(
name="pytorch_fd",
author="Ben Mann",
version='0.0.0',
author_email="[email protected]",
packages=find_packages(exclude=["*.tests", "*.tests.*",
"tests.*", "tests"]),
long_description=open("README.md", "r", encoding='utf-8').read(),
long_description_content_type="text/markdown",
license='MIT',
url="https://github.com/cybertronai/pytorch-fd",
install_requires=[
'torch>=0.4.1',
'tqdm',
'tensorboardX',
'torchvision',
],
)
|
py
|
1a5d14b4882f062f005e91bf3668a7e3ea9670d5
|
import datetime
import logging
import multiprocessing
import os
import secrets
import shutil
from typing import Any, Dict, Iterable, List, Optional, Tuple
import boto3
import orjson
from bs4 import BeautifulSoup
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from django.utils.timezone import now as timezone_now
from psycopg2.extras import execute_values
from psycopg2.sql import SQL, Identifier
from analytics.models import RealmCount, StreamCount, UserCount
from zerver.lib.actions import (
UserMessageLite,
bulk_insert_ums,
do_change_avatar_fields,
do_change_plan_type,
)
from zerver.lib.avatar_hash import user_avatar_path_from_ids
from zerver.lib.bulk_create import bulk_create_users, bulk_set_users_or_streams_recipient_fields
from zerver.lib.export import DATE_FIELDS, Field, Path, Record, TableData, TableName
from zerver.lib.markdown import markdown_convert
from zerver.lib.markdown import version as markdown_version
from zerver.lib.message import get_last_message_id
from zerver.lib.server_initialization import create_internal_realm, server_initialized
from zerver.lib.streams import render_stream_description
from zerver.lib.timestamp import datetime_to_timestamp
from zerver.lib.upload import BadImageError, guess_type, sanitize_name
from zerver.lib.utils import generate_api_key, process_list_in_batches
from zerver.models import (
AlertWord,
Attachment,
BotConfigData,
BotStorageData,
Client,
CustomProfileField,
CustomProfileFieldValue,
DefaultStream,
Huddle,
Message,
MutedTopic,
Reaction,
Realm,
RealmAuditLog,
RealmDomain,
RealmEmoji,
RealmFilter,
Recipient,
Service,
Stream,
Subscription,
UserActivity,
UserActivityInterval,
UserGroup,
UserGroupMembership,
UserHotspot,
UserMessage,
UserPresence,
UserProfile,
get_huddle_hash,
get_system_bot,
get_user_profile_by_id,
)
realm_tables = [("zerver_defaultstream", DefaultStream, "defaultstream"),
("zerver_realmemoji", RealmEmoji, "realmemoji"),
("zerver_realmdomain", RealmDomain, "realmdomain"),
("zerver_realmfilter", RealmFilter, "realmfilter")] # List[Tuple[TableName, Any, str]]
# ID_MAP is a dictionary that maps table names to dictionaries
# that map old ids to new ids. We use this in
# re_map_foreign_keys and other places.
#
# We explicitly initialize ID_MAP with the tables that support
# id re-mapping.
#
# Code reviewers: give these tables extra scrutiny, as we need to
# make sure to reload related tables AFTER we re-map the ids.
ID_MAP: Dict[str, Dict[int, int]] = {
'alertword': {},
'client': {},
'user_profile': {},
'huddle': {},
'realm': {},
'stream': {},
'recipient': {},
'subscription': {},
'defaultstream': {},
'reaction': {},
'realmemoji': {},
'realmdomain': {},
'realmfilter': {},
'message': {},
'user_presence': {},
'useractivity': {},
'useractivityinterval': {},
'usermessage': {},
'customprofilefield': {},
'customprofilefieldvalue': {},
'attachment': {},
'realmauditlog': {},
'recipient_to_huddle_map': {},
'userhotspot': {},
'mutedtopic': {},
'service': {},
'usergroup': {},
'usergroupmembership': {},
'botstoragedata': {},
'botconfigdata': {},
'analytics_realmcount': {},
'analytics_streamcount': {},
'analytics_usercount': {},
}
id_map_to_list: Dict[str, Dict[int, List[int]]] = {
'huddle_to_user_list': {},
}
path_maps: Dict[str, Dict[str, str]] = {
'attachment_path': {},
}
def update_id_map(table: TableName, old_id: int, new_id: int) -> None:
if table not in ID_MAP:
raise Exception(f'''
Table {table} is not initialized in ID_MAP, which could
mean that we have not thought through circular
dependencies.
''')
ID_MAP[table][old_id] = new_id
def fix_datetime_fields(data: TableData, table: TableName) -> None:
for item in data[table]:
for field_name in DATE_FIELDS[table]:
if item[field_name] is not None:
item[field_name] = datetime.datetime.fromtimestamp(item[field_name], tz=datetime.timezone.utc)
def fix_upload_links(data: TableData, message_table: TableName) -> None:
"""
Because the URLs for uploaded files encode the realm ID of the
organization being imported (which is only determined at import
time), we need to rewrite the URLs of links to uploaded files
during the import process.
"""
for message in data[message_table]:
if message['has_attachment'] is True:
for key, value in path_maps['attachment_path'].items():
if key in message['content']:
message['content'] = message['content'].replace(key, value)
if message['rendered_content']:
message['rendered_content'] = message['rendered_content'].replace(key, value)
def create_subscription_events(data: TableData, realm_id: int) -> None:
"""
When the export data doesn't contain the table `zerver_realmauditlog`,
this function creates RealmAuditLog objects for `subscription_created`
type event for all the existing Stream subscriptions.
This is needed for all the export tools which do not include the
table `zerver_realmauditlog` (Slack, Gitter, etc.) because the appropriate
data about when a user was subscribed is not exported by the third-party
service.
"""
all_subscription_logs = []
event_last_message_id = get_last_message_id()
event_time = timezone_now()
recipient_id_to_stream_id = {
d['id']: d['type_id']
for d in data['zerver_recipient']
if d['type'] == Recipient.STREAM
}
for sub in data['zerver_subscription']:
recipient_id = sub['recipient_id']
stream_id = recipient_id_to_stream_id.get(recipient_id)
if stream_id is None:
continue
user_id = sub['user_profile_id']
all_subscription_logs.append(RealmAuditLog(realm_id=realm_id,
acting_user_id=user_id,
modified_user_id=user_id,
modified_stream_id=stream_id,
event_last_message_id=event_last_message_id,
event_time=event_time,
event_type=RealmAuditLog.SUBSCRIPTION_CREATED))
RealmAuditLog.objects.bulk_create(all_subscription_logs)
def fix_service_tokens(data: TableData, table: TableName) -> None:
"""
The tokens in the services are created by 'generate_api_key'.
As the tokens are unique, they should be re-created for the imports.
"""
for item in data[table]:
item['token'] = generate_api_key()
def process_huddle_hash(data: TableData, table: TableName) -> None:
"""
Build new huddle hashes with the updated ids of the users
"""
for huddle in data[table]:
user_id_list = id_map_to_list['huddle_to_user_list'][huddle['id']]
huddle['huddle_hash'] = get_huddle_hash(user_id_list)
def get_huddles_from_subscription(data: TableData, table: TableName) -> None:
"""
Extract the IDs of the user_profiles involved in a huddle from the subscription object
This helps to generate a unique huddle hash from the updated user_profile ids
"""
id_map_to_list['huddle_to_user_list'] = {
value: [] for value in ID_MAP['recipient_to_huddle_map'].values()}
for subscription in data[table]:
if subscription['recipient'] in ID_MAP['recipient_to_huddle_map']:
huddle_id = ID_MAP['recipient_to_huddle_map'][subscription['recipient']]
id_map_to_list['huddle_to_user_list'][huddle_id].append(subscription['user_profile_id'])
def fix_customprofilefield(data: TableData) -> None:
"""
In CustomProfileField with 'field_type' like 'USER', the IDs need to be
re-mapped.
"""
field_type_USER_id_list = []
for item in data['zerver_customprofilefield']:
if item['field_type'] == CustomProfileField.USER:
field_type_USER_id_list.append(item['id'])
for item in data['zerver_customprofilefieldvalue']:
if item['field_id'] in field_type_USER_id_list:
old_user_id_list = orjson.loads(item['value'])
new_id_list = re_map_foreign_keys_many_to_many_internal(
table='zerver_customprofilefieldvalue',
field_name='value',
related_table='user_profile',
old_id_list=old_user_id_list)
item['value'] = orjson.dumps(new_id_list).decode()
def fix_message_rendered_content(realm: Realm,
sender_map: Dict[int, Record],
messages: List[Record]) -> None:
"""
This function sets the rendered_content of all the messages
after the messages have been imported from a non-Zulip platform.
"""
for message in messages:
if message['rendered_content'] is not None:
# For Zulip->Zulip imports, we use the original rendered
# Markdown; this avoids issues where e.g. a mention can no
# longer render properly because a user has changed their
# name.
#
# However, we still need to update the data-user-id and
# similar values stored on mentions, stream mentions, and
# similar syntax in the rendered HTML.
soup = BeautifulSoup(message["rendered_content"], "html.parser")
user_mentions = soup.findAll("span", {"class": "user-mention"})
if len(user_mentions) != 0:
user_id_map = ID_MAP["user_profile"]
for mention in user_mentions:
if not mention.has_attr("data-user-id"):
# Legacy mentions don't have a data-user-id
# field; we should just import them
# unmodified.
continue
if mention['data-user-id'] == "*":
# No rewriting is required for wildcard mentions
continue
old_user_id = int(mention["data-user-id"])
if old_user_id in user_id_map:
mention["data-user-id"] = str(user_id_map[old_user_id])
message['rendered_content'] = str(soup)
stream_mentions = soup.findAll("a", {"class": "stream"})
if len(stream_mentions) != 0:
stream_id_map = ID_MAP["stream"]
for mention in stream_mentions:
old_stream_id = int(mention["data-stream-id"])
if old_stream_id in stream_id_map:
mention["data-stream-id"] = str(stream_id_map[old_stream_id])
message['rendered_content'] = str(soup)
user_group_mentions = soup.findAll("span", {"class": "user-group-mention"})
if len(user_group_mentions) != 0:
user_group_id_map = ID_MAP["usergroup"]
for mention in user_group_mentions:
old_user_group_id = int(mention["data-user-group-id"])
if old_user_group_id in user_group_id_map:
mention["data-user-group-id"] = str(user_group_id_map[old_user_group_id])
message['rendered_content'] = str(soup)
continue
try:
content = message['content']
sender_id = message['sender_id']
sender = sender_map[sender_id]
sent_by_bot = sender['is_bot']
translate_emoticons = sender['translate_emoticons']
# We don't handle alert words on import from third-party
# platforms, since they generally don't have an "alert
# words" type feature, and notifications aren't important anyway.
realm_alert_words_automaton = None
rendered_content = markdown_convert(
content=content,
realm_alert_words_automaton=realm_alert_words_automaton,
message_realm=realm,
sent_by_bot=sent_by_bot,
translate_emoticons=translate_emoticons,
)
message['rendered_content'] = rendered_content
message['rendered_content_version'] = markdown_version
except Exception:
# This generally happens with two possible causes:
# * rendering Markdown throwing an uncaught exception
# * rendering Markdown failing with the exception being
# caught in Markdown (which then returns None, causing the the
# rendered_content assert above to fire).
logging.warning("Error in Markdown rendering for message ID %s; continuing", message['id'])
def current_table_ids(data: TableData, table: TableName) -> List[int]:
"""
Returns the ids present in the current table
"""
id_list = []
for item in data[table]:
id_list.append(item["id"])
return id_list
def idseq(model_class: Any) -> str:
if model_class == RealmDomain:
return 'zerver_realmalias_id_seq'
elif model_class == BotStorageData:
return 'zerver_botuserstatedata_id_seq'
elif model_class == BotConfigData:
return 'zerver_botuserconfigdata_id_seq'
return f'{model_class._meta.db_table}_id_seq'
def allocate_ids(model_class: Any, count: int) -> List[int]:
"""
Increases the sequence number for a given table by the amount of objects being
imported into that table. Hence, this gives a reserved range of IDs to import the
converted Slack objects into the tables.
"""
conn = connection.cursor()
sequence = idseq(model_class)
conn.execute("select nextval(%s) from generate_series(1, %s)",
[sequence, count])
query = conn.fetchall() # Each element in the result is a tuple like (5,)
conn.close()
# convert List[Tuple[int]] to List[int]
return [item[0] for item in query]
def convert_to_id_fields(data: TableData, table: TableName, field_name: Field) -> None:
'''
When Django gives us dict objects via model_to_dict, the foreign
key fields are `foo`, but we want `foo_id` for the bulk insert.
This function handles the simple case where we simply rename
the fields. For cases where we need to munge ids in the
database, see re_map_foreign_keys.
'''
for item in data[table]:
item[field_name + "_id"] = item[field_name]
del item[field_name]
def re_map_foreign_keys(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
"""
This is a wrapper function for all the realm data tables
and only avatar and attachment records need to be passed through the internal function
because of the difference in data format (TableData corresponding to realm data tables
and List[Record] corresponding to the avatar and attachment records)
"""
# See comments in bulk_import_user_message_data.
assert('usermessage' not in related_table)
re_map_foreign_keys_internal(data[table], table, field_name, related_table, verbose, id_field,
recipient_field, reaction_field)
def re_map_foreign_keys_internal(data_table: List[Record],
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False,
id_field: bool=False,
recipient_field: bool=False,
reaction_field: bool=False) -> None:
'''
We occasionally need to assign new ids to rows during the
import/export process, to accommodate things like existing rows
already being in tables. See bulk_import_client for more context.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this fixer function does
the re-mapping. (It also appends `_id` to the field.)
'''
lookup_table = ID_MAP[related_table]
for item in data_table:
old_id = item[field_name]
if recipient_field:
if related_table == "stream" and item['type'] == 2:
pass
elif related_table == "user_profile" and item['type'] == 1:
pass
elif related_table == "huddle" and item['type'] == 3:
# save the recipient id with the huddle id, so that we can extract
# the user_profile ids involved in a huddle with the help of the
# subscription object
# check function 'get_huddles_from_subscription'
ID_MAP['recipient_to_huddle_map'][item['id']] = lookup_table[old_id]
else:
continue
old_id = item[field_name]
if reaction_field:
if item['reaction_type'] == Reaction.REALM_EMOJI:
old_id = int(old_id)
else:
continue
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s',
table, field_name + '_id', old_id, new_id)
else:
new_id = old_id
if not id_field:
item[field_name + "_id"] = new_id
del item[field_name]
else:
if reaction_field:
item[field_name] = str(new_id)
else:
item[field_name] = new_id
def re_map_foreign_keys_many_to_many(data: TableData,
table: TableName,
field_name: Field,
related_table: TableName,
verbose: bool=False) -> None:
"""
We need to assign new ids to rows during the import/export
process.
The tricky part is making sure that foreign key references
are in sync with the new ids, and this wrapper function does
the re-mapping only for ManyToMany fields.
"""
for item in data[table]:
old_id_list = item[field_name]
new_id_list = re_map_foreign_keys_many_to_many_internal(
table, field_name, related_table, old_id_list, verbose)
item[field_name] = new_id_list
del item[field_name]
def re_map_foreign_keys_many_to_many_internal(table: TableName,
field_name: Field,
related_table: TableName,
old_id_list: List[int],
verbose: bool=False) -> List[int]:
"""
This is an internal function for tables with ManyToMany fields,
which takes the old ID list of the ManyToMany relation and returns the
new updated ID list.
"""
lookup_table = ID_MAP[related_table]
new_id_list = []
for old_id in old_id_list:
if old_id in lookup_table:
new_id = lookup_table[old_id]
if verbose:
logging.info('Remapping %s %s from %s to %s',
table, field_name + '_id', old_id, new_id)
else:
new_id = old_id
new_id_list.append(new_id)
return new_id_list
def fix_bitfield_keys(data: TableData, table: TableName, field_name: Field) -> None:
for item in data[table]:
item[field_name] = item[field_name + '_mask']
del item[field_name + '_mask']
def fix_realm_authentication_bitfield(data: TableData, table: TableName, field_name: Field) -> None:
"""Used to fixup the authentication_methods bitfield to be a string"""
for item in data[table]:
values_as_bitstring = ''.join('1' if field[1] else '0' for field in
item[field_name])
values_as_int = int(values_as_bitstring, 2)
item[field_name] = values_as_int
def remove_denormalized_recipient_column_from_data(data: TableData) -> None:
"""
The recipient column shouldn't be imported, we'll set the correct values
when Recipient table gets imported.
"""
for stream_dict in data['zerver_stream']:
if "recipient" in stream_dict:
del stream_dict["recipient"]
for user_profile_dict in data['zerver_userprofile']:
if 'recipient' in user_profile_dict:
del user_profile_dict['recipient']
for huddle_dict in data['zerver_huddle']:
if 'recipient' in huddle_dict:
del huddle_dict['recipient']
def get_db_table(model_class: Any) -> str:
"""E.g. (RealmDomain -> 'zerver_realmdomain')"""
return model_class._meta.db_table
def update_model_ids(model: Any, data: TableData, related_table: TableName) -> None:
table = get_db_table(model)
# Important: remapping usermessage rows is
# not only unnessary, it's expensive and can cause
# memory errors. We don't even use ids from ID_MAP.
assert('usermessage' not in table)
old_id_list = current_table_ids(data, table)
allocated_id_list = allocate_ids(model, len(data[table]))
for item in range(len(data[table])):
update_id_map(related_table, old_id_list[item], allocated_id_list[item])
re_map_foreign_keys(data, table, 'id', related_table=related_table, id_field=True)
def bulk_import_user_message_data(data: TableData, dump_file_id: int) -> None:
model = UserMessage
table = 'zerver_usermessage'
lst = data[table]
# IMPORTANT NOTE: We do not use any primary id
# data from either the import itself or ID_MAP.
# We let the DB itself generate ids. Note that
# no tables use user_message.id as a foreign key,
# so we can safely avoid all re-mapping complexity.
def process_batch(items: List[Dict[str, Any]]) -> None:
ums = [
UserMessageLite(
user_profile_id = item['user_profile_id'],
message_id = item['message_id'],
flags=item['flags'],
)
for item in items
]
bulk_insert_ums(ums)
chunk_size = 10000
process_list_in_batches(
lst=lst,
chunk_size=chunk_size,
process_batch=process_batch,
)
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
def bulk_import_model(data: TableData, model: Any, dump_file_id: Optional[str]=None) -> None:
table = get_db_table(model)
# TODO, deprecate dump_file_id
model.objects.bulk_create(model(**item) for item in data[table])
if dump_file_id is None:
logging.info("Successfully imported %s from %s.", model, table)
else:
logging.info("Successfully imported %s from %s[%s].", model, table, dump_file_id)
# Client is a table shared by multiple realms, so in order to
# correctly import multiple realms into the same server, we need to
# check if a Client object already exists, and so we need to support
# remap all Client IDs to the values in the new DB.
def bulk_import_client(data: TableData, model: Any, table: TableName) -> None:
for item in data[table]:
try:
client = Client.objects.get(name=item['name'])
except Client.DoesNotExist:
client = Client.objects.create(name=item['name'])
update_id_map(table='client', old_id=item['id'], new_id=client.id)
def process_avatars(record: Dict[str, Any]) -> None:
from zerver.lib.upload import upload_backend
if record['s3_path'].endswith('.original'):
user_profile = get_user_profile_by_id(record['user_profile_id'])
if settings.LOCAL_UPLOADS_DIR is not None:
avatar_path = user_avatar_path_from_ids(user_profile.id, record['realm_id'])
medium_file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars",
avatar_path) + '-medium.png'
if os.path.exists(medium_file_path):
# We remove the image here primarily to deal with
# issues when running the import script multiple
# times in development (where one might reuse the
# same realm ID from a previous iteration).
os.remove(medium_file_path)
try:
upload_backend.ensure_medium_avatar_image(user_profile=user_profile)
if record.get("importer_should_thumbnail"):
upload_backend.ensure_basic_avatar_image(user_profile=user_profile)
except BadImageError:
logging.warning(
"Could not thumbnail avatar image for user %s; ignoring",
user_profile.id,
)
# Delete the record of the avatar to avoid 404s.
do_change_avatar_fields(user_profile, UserProfile.AVATAR_FROM_GRAVATAR, acting_user=None)
def import_uploads(realm: Realm, import_dir: Path, processes: int, processing_avatars: bool=False,
processing_emojis: bool=False, processing_realm_icons: bool=False) -> None:
if processing_avatars and processing_emojis:
raise AssertionError("Cannot import avatars and emojis at the same time!")
if processing_avatars:
logging.info("Importing avatars")
elif processing_emojis:
logging.info("Importing emojis")
elif processing_realm_icons:
logging.info("Importing realm icons and logos")
else:
logging.info("Importing uploaded files")
records_filename = os.path.join(import_dir, "records.json")
with open(records_filename, "rb") as records_file:
records: List[Dict[str, Any]] = orjson.loads(records_file.read())
timestamp = datetime_to_timestamp(timezone_now())
re_map_foreign_keys_internal(records, 'records', 'realm_id', related_table="realm",
id_field=True)
if not processing_emojis and not processing_realm_icons:
re_map_foreign_keys_internal(records, 'records', 'user_profile_id',
related_table="user_profile", id_field=True)
s3_uploads = settings.LOCAL_UPLOADS_DIR is None
if s3_uploads:
if processing_avatars or processing_emojis or processing_realm_icons:
bucket_name = settings.S3_AVATAR_BUCKET
else:
bucket_name = settings.S3_AUTH_UPLOADS_BUCKET
session = boto3.Session(settings.S3_KEY, settings.S3_SECRET_KEY)
bucket = session.resource('s3', endpoint_url=settings.S3_ENDPOINT_URL).Bucket(bucket_name)
count = 0
for record in records:
count += 1
if count % 1000 == 0:
logging.info("Processed %s/%s uploads", count, len(records))
if processing_avatars:
# For avatars, we need to rehash the user ID with the
# new server's avatar salt
relative_path = user_avatar_path_from_ids(record['user_profile_id'], record['realm_id'])
if record['s3_path'].endswith('.original'):
relative_path += '.original'
else:
# TODO: This really should be unconditional. However,
# until we fix the S3 upload backend to use the .png
# path suffix for its normal avatar URLs, we need to
# only do this for the LOCAL_UPLOADS_DIR backend.
if not s3_uploads:
relative_path += '.png'
elif processing_emojis:
# For emojis we follow the function 'upload_emoji_image'
relative_path = RealmEmoji.PATH_ID_TEMPLATE.format(
realm_id=record['realm_id'],
emoji_file_name=record['file_name'])
record['last_modified'] = timestamp
elif processing_realm_icons:
icon_name = os.path.basename(record["path"])
relative_path = os.path.join(str(record['realm_id']), "realm", icon_name)
record['last_modified'] = timestamp
else:
# Should be kept in sync with its equivalent in zerver/lib/uploads in the
# function 'upload_message_file'
relative_path = "/".join([
str(record['realm_id']),
secrets.token_urlsafe(18),
sanitize_name(os.path.basename(record['path'])),
])
path_maps['attachment_path'][record['s3_path']] = relative_path
if s3_uploads:
key = bucket.Object(relative_path)
metadata = {}
if processing_emojis and "user_profile_id" not in record:
# Exported custom emoji from tools like Slack don't have
# the data for what user uploaded them in `user_profile_id`.
pass
elif processing_realm_icons and "user_profile_id" not in record:
# Exported realm icons and logos from local export don't have
# the value of user_profile_id in the associated record.
pass
else:
user_profile_id = int(record['user_profile_id'])
# Support email gateway bot and other cross-realm messages
if user_profile_id in ID_MAP["user_profile"]:
logging.info("Uploaded by ID mapped user: %s!", user_profile_id)
user_profile_id = ID_MAP["user_profile"][user_profile_id]
user_profile = get_user_profile_by_id(user_profile_id)
metadata["user_profile_id"] = str(user_profile.id)
if 'last_modified' in record:
metadata["orig_last_modified"] = str(record['last_modified'])
metadata["realm_id"] = str(record['realm_id'])
# Zulip exports will always have a content-type, but third-party exports might not.
content_type = record.get("content_type")
if content_type is None:
content_type = guess_type(record['s3_path'])[0]
if content_type is None:
# This is the default for unknown data. Note that
# for `.original` files, this is the value we'll
# set; that is OK, because those are never served
# directly anyway.
content_type = 'application/octet-stream'
key.upload_file(os.path.join(import_dir, record['path']),
ExtraArgs={
'ContentType': content_type,
'Metadata': metadata})
else:
if processing_avatars or processing_emojis or processing_realm_icons:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "avatars", relative_path)
else:
file_path = os.path.join(settings.LOCAL_UPLOADS_DIR, "files", relative_path)
orig_file_path = os.path.join(import_dir, record['path'])
os.makedirs(os.path.dirname(file_path), exist_ok=True)
shutil.copy(orig_file_path, file_path)
if processing_avatars:
# Ensure that we have medium-size avatar images for every
# avatar. TODO: This implementation is hacky, both in that it
# does get_user_profile_by_id for each user, and in that it
# might be better to require the export to just have these.
if processes == 1:
for record in records:
process_avatars(record)
else:
connection.close()
cache._cache.disconnect_all()
with multiprocessing.Pool(processes) as p:
for out in p.imap_unordered(process_avatars, records):
pass
# Importing data suffers from a difficult ordering problem because of
# models that reference each other circularly. Here is a correct order.
#
# * Client [no deps]
# * Realm [-notifications_stream]
# * Stream [only depends on realm]
# * Realm's notifications_stream
# * Now can do all realm_tables
# * UserProfile, in order by ID to avoid bot loop issues
# * Huddle
# * Recipient
# * Subscription
# * Message
# * UserMessage
#
# Because the Python object => JSON conversion process is not fully
# faithful, we have to use a set of fixers (e.g. on DateTime objects
# and Foreign Keys) to do the import correctly.
def do_import_realm(import_dir: Path, subdomain: str, processes: int=1) -> Realm:
logging.info("Importing realm dump %s", import_dir)
if not os.path.exists(import_dir):
raise Exception("Missing import directory!")
realm_data_filename = os.path.join(import_dir, "realm.json")
if not os.path.exists(realm_data_filename):
raise Exception("Missing realm.json file!")
if not server_initialized():
create_internal_realm()
logging.info("Importing realm data from %s", realm_data_filename)
with open(realm_data_filename, "rb") as f:
data = orjson.loads(f.read())
remove_denormalized_recipient_column_from_data(data)
sort_by_date = data.get('sort_by_date', False)
bulk_import_client(data, Client, 'zerver_client')
# We don't import the Stream model yet, since it depends on Realm,
# which isn't imported yet. But we need the Stream model IDs for
# notifications_stream.
update_model_ids(Stream, data, 'stream')
re_map_foreign_keys(data, 'zerver_realm', 'notifications_stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realm', 'signup_notifications_stream', related_table="stream")
fix_datetime_fields(data, 'zerver_realm')
# Fix realm subdomain information
data['zerver_realm'][0]['string_id'] = subdomain
data['zerver_realm'][0]['name'] = subdomain
fix_realm_authentication_bitfield(data, 'zerver_realm', 'authentication_methods')
update_model_ids(Realm, data, 'realm')
realm = Realm(**data['zerver_realm'][0])
if realm.notifications_stream_id is not None:
notifications_stream_id: Optional[int] = int(realm.notifications_stream_id)
else:
notifications_stream_id = None
realm.notifications_stream_id = None
if realm.signup_notifications_stream_id is not None:
signup_notifications_stream_id: Optional[int] = int(realm.signup_notifications_stream_id)
else:
signup_notifications_stream_id = None
realm.signup_notifications_stream_id = None
realm.save()
# Email tokens will automatically be randomly generated when the
# Stream objects are created by Django.
fix_datetime_fields(data, 'zerver_stream')
re_map_foreign_keys(data, 'zerver_stream', 'realm', related_table="realm")
# Handle rendering of stream descriptions for import from non-Zulip
for stream in data['zerver_stream']:
if 'rendered_description' in stream:
continue
stream["rendered_description"] = render_stream_description(stream["description"])
bulk_import_model(data, Stream)
realm.notifications_stream_id = notifications_stream_id
realm.signup_notifications_stream_id = signup_notifications_stream_id
realm.save()
# Remap the user IDs for notification_bot and friends to their
# appropriate IDs on this server
for item in data['zerver_userprofile_crossrealm']:
logging.info("Adding to ID map: %s %s", item['id'], get_system_bot(item['email']).id)
new_user_id = get_system_bot(item['email']).id
update_id_map(table='user_profile', old_id=item['id'], new_id=new_user_id)
new_recipient_id = Recipient.objects.get(type=Recipient.PERSONAL, type_id=new_user_id).id
update_id_map(table='recipient', old_id=item['recipient_id'], new_id=new_recipient_id)
# Merge in zerver_userprofile_mirrordummy
data['zerver_userprofile'] = data['zerver_userprofile'] + data['zerver_userprofile_mirrordummy']
del data['zerver_userprofile_mirrordummy']
data['zerver_userprofile'].sort(key=lambda r: r['id'])
# To remap foreign key for UserProfile.last_active_message_id
update_message_foreign_keys(import_dir=import_dir, sort_by_date=sort_by_date)
fix_datetime_fields(data, 'zerver_userprofile')
update_model_ids(UserProfile, data, 'user_profile')
re_map_foreign_keys(data, 'zerver_userprofile', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_userprofile', 'bot_owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_sending_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'default_events_register_stream',
related_table="stream")
re_map_foreign_keys(data, 'zerver_userprofile', 'last_active_message_id',
related_table="message", id_field=True)
for user_profile_dict in data['zerver_userprofile']:
user_profile_dict['password'] = None
user_profile_dict['api_key'] = generate_api_key()
# Since Zulip doesn't use these permissions, drop them
del user_profile_dict['user_permissions']
del user_profile_dict['groups']
# The short_name field is obsolete in Zulip, but it's
# convenient for third party exports to populate it.
if 'short_name' in user_profile_dict:
del user_profile_dict['short_name']
user_profiles = [UserProfile(**item) for item in data['zerver_userprofile']]
for user_profile in user_profiles:
user_profile.set_unusable_password()
UserProfile.objects.bulk_create(user_profiles)
re_map_foreign_keys(data, 'zerver_defaultstream', 'stream', related_table="stream")
re_map_foreign_keys(data, 'zerver_realmemoji', 'author', related_table="user_profile")
for (table, model, related_table) in realm_tables:
re_map_foreign_keys(data, table, 'realm', related_table="realm")
update_model_ids(model, data, related_table)
bulk_import_model(data, model)
if 'zerver_huddle' in data:
update_model_ids(Huddle, data, 'huddle')
# We don't import Huddle yet, since we don't have the data to
# compute huddle hashes until we've imported some of the
# tables below.
# TODO: double-check this.
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="stream",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="user_profile",
recipient_field=True, id_field=True)
re_map_foreign_keys(data, 'zerver_recipient', 'type_id', related_table="huddle",
recipient_field=True, id_field=True)
update_model_ids(Recipient, data, 'recipient')
bulk_import_model(data, Recipient)
bulk_set_users_or_streams_recipient_fields(Stream, Stream.objects.filter(realm=realm))
bulk_set_users_or_streams_recipient_fields(UserProfile, UserProfile.objects.filter(realm=realm))
re_map_foreign_keys(data, 'zerver_subscription', 'user_profile', related_table="user_profile")
get_huddles_from_subscription(data, 'zerver_subscription')
re_map_foreign_keys(data, 'zerver_subscription', 'recipient', related_table="recipient")
update_model_ids(Subscription, data, 'subscription')
bulk_import_model(data, Subscription)
if 'zerver_realmauditlog' in data:
fix_datetime_fields(data, 'zerver_realmauditlog')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'realm', related_table="realm")
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'acting_user',
related_table='user_profile')
re_map_foreign_keys(data, 'zerver_realmauditlog', 'modified_stream',
related_table="stream")
update_model_ids(RealmAuditLog, data, related_table="realmauditlog")
bulk_import_model(data, RealmAuditLog)
else:
logging.info('about to call create_subscription_events')
create_subscription_events(
data=data,
realm_id=realm.id,
)
logging.info('done with create_subscription_events')
if 'zerver_huddle' in data:
process_huddle_hash(data, 'zerver_huddle')
bulk_import_model(data, Huddle)
for huddle in Huddle.objects.filter(recipient_id=None):
recipient = Recipient.objects.get(type=Recipient.HUDDLE, type_id=huddle.id)
huddle.recipient = recipient
huddle.save(update_fields=["recipient"])
if 'zerver_alertword' in data:
re_map_foreign_keys(data, 'zerver_alertword', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_alertword', 'realm', related_table='realm')
update_model_ids(AlertWord, data, 'alertword')
bulk_import_model(data, AlertWord)
if 'zerver_userhotspot' in data:
fix_datetime_fields(data, 'zerver_userhotspot')
re_map_foreign_keys(data, 'zerver_userhotspot', 'user', related_table='user_profile')
update_model_ids(UserHotspot, data, 'userhotspot')
bulk_import_model(data, UserHotspot)
if 'zerver_mutedtopic' in data:
fix_datetime_fields(data, 'zerver_mutedtopic')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'user_profile', related_table='user_profile')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'stream', related_table='stream')
re_map_foreign_keys(data, 'zerver_mutedtopic', 'recipient', related_table='recipient')
update_model_ids(MutedTopic, data, 'mutedtopic')
bulk_import_model(data, MutedTopic)
if 'zerver_service' in data:
re_map_foreign_keys(data, 'zerver_service', 'user_profile', related_table='user_profile')
fix_service_tokens(data, 'zerver_service')
update_model_ids(Service, data, 'service')
bulk_import_model(data, Service)
if 'zerver_usergroup' in data:
re_map_foreign_keys(data, 'zerver_usergroup', 'realm', related_table='realm')
re_map_foreign_keys_many_to_many(data, 'zerver_usergroup',
'members', related_table='user_profile')
update_model_ids(UserGroup, data, 'usergroup')
bulk_import_model(data, UserGroup)
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_group', related_table='usergroup')
re_map_foreign_keys(data, 'zerver_usergroupmembership',
'user_profile', related_table='user_profile')
update_model_ids(UserGroupMembership, data, 'usergroupmembership')
bulk_import_model(data, UserGroupMembership)
if 'zerver_botstoragedata' in data:
re_map_foreign_keys(data, 'zerver_botstoragedata', 'bot_profile', related_table='user_profile')
update_model_ids(BotStorageData, data, 'botstoragedata')
bulk_import_model(data, BotStorageData)
if 'zerver_botconfigdata' in data:
re_map_foreign_keys(data, 'zerver_botconfigdata', 'bot_profile', related_table='user_profile')
update_model_ids(BotConfigData, data, 'botconfigdata')
bulk_import_model(data, BotConfigData)
fix_datetime_fields(data, 'zerver_userpresence')
re_map_foreign_keys(data, 'zerver_userpresence', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_userpresence', 'client', related_table='client')
re_map_foreign_keys(data, 'zerver_userpresence', 'realm', related_table="realm")
update_model_ids(UserPresence, data, 'user_presence')
bulk_import_model(data, UserPresence)
fix_datetime_fields(data, 'zerver_useractivity')
re_map_foreign_keys(data, 'zerver_useractivity', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_useractivity', 'client', related_table='client')
update_model_ids(UserActivity, data, 'useractivity')
bulk_import_model(data, UserActivity)
fix_datetime_fields(data, 'zerver_useractivityinterval')
re_map_foreign_keys(data, 'zerver_useractivityinterval', 'user_profile', related_table="user_profile")
update_model_ids(UserActivityInterval, data, 'useractivityinterval')
bulk_import_model(data, UserActivityInterval)
re_map_foreign_keys(data, 'zerver_customprofilefield', 'realm', related_table="realm")
update_model_ids(CustomProfileField, data, related_table="customprofilefield")
bulk_import_model(data, CustomProfileField)
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'user_profile',
related_table="user_profile")
re_map_foreign_keys(data, 'zerver_customprofilefieldvalue', 'field',
related_table="customprofilefield")
fix_customprofilefield(data)
update_model_ids(CustomProfileFieldValue, data, related_table="customprofilefieldvalue")
bulk_import_model(data, CustomProfileFieldValue)
# Import uploaded files and avatars
import_uploads(realm, os.path.join(import_dir, "avatars"), processes, processing_avatars=True)
import_uploads(realm, os.path.join(import_dir, "uploads"), processes)
# We need to have this check as the emoji files are only present in the data
# importer from Slack
# For Zulip export, this doesn't exist
if os.path.exists(os.path.join(import_dir, "emoji")):
import_uploads(realm, os.path.join(import_dir, "emoji"), processes, processing_emojis=True)
if os.path.exists(os.path.join(import_dir, "realm_icons")):
import_uploads(realm, os.path.join(import_dir, "realm_icons"), processes,
processing_realm_icons=True)
sender_map = {
user['id']: user
for user in data['zerver_userprofile']
}
# Import zerver_message and zerver_usermessage
import_message_data(realm=realm, sender_map=sender_map, import_dir=import_dir)
re_map_foreign_keys(data, 'zerver_reaction', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_reaction', 'user_profile', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_reaction', 'emoji_code', related_table="realmemoji", id_field=True,
reaction_field=True)
update_model_ids(Reaction, data, 'reaction')
bulk_import_model(data, Reaction)
# Similarly, we need to recalculate the first_message_id for stream objects.
for stream in Stream.objects.filter(realm=realm):
recipient = Recipient.objects.get(type=Recipient.STREAM, type_id=stream.id)
first_message = Message.objects.filter(recipient=recipient).first()
if first_message is None:
stream.first_message_id = None
else:
stream.first_message_id = first_message.id
stream.save(update_fields=["first_message_id"])
# Do attachments AFTER message data is loaded.
# TODO: de-dup how we read these json files.
fn = os.path.join(import_dir, "attachment.json")
if not os.path.exists(fn):
raise Exception("Missing attachment.json file!")
logging.info("Importing attachment data from %s", fn)
with open(fn, "rb") as f:
data = orjson.loads(f.read())
import_attachments(data)
# Import the analytics file.
import_analytics_data(realm=realm, import_dir=import_dir)
if settings.BILLING_ENABLED:
do_change_plan_type(realm, Realm.LIMITED)
else:
do_change_plan_type(realm, Realm.SELF_HOSTED)
return realm
# create_users and do_import_system_bots differ from their equivalent
# in zerver/lib/server_initialization.py because here we check if the
# bots don't already exist and only then create a user for these bots.
def do_import_system_bots(realm: Any) -> None:
internal_bots = [(bot['name'], bot['email_template'] % (settings.INTERNAL_BOT_DOMAIN,))
for bot in settings.INTERNAL_BOTS]
create_users(realm, internal_bots, bot_type=UserProfile.DEFAULT_BOT)
print("Finished importing system bots.")
def create_users(realm: Realm, name_list: Iterable[Tuple[str, str]],
bot_type: Optional[int]=None) -> None:
user_set = set()
for full_name, email in name_list:
if not UserProfile.objects.filter(email=email):
user_set.add((email, full_name, True))
bulk_create_users(realm, user_set, bot_type)
def update_message_foreign_keys(import_dir: Path,
sort_by_date: bool) -> None:
old_id_list = get_incoming_message_ids(
import_dir=import_dir,
sort_by_date=sort_by_date,
)
count = len(old_id_list)
new_id_list = allocate_ids(model_class=Message, count=count)
for old_id, new_id in zip(old_id_list, new_id_list):
update_id_map(
table='message',
old_id=old_id,
new_id=new_id,
)
# We don't touch user_message keys here; that happens later when
# we're actually read the files a second time to get actual data.
def get_incoming_message_ids(import_dir: Path,
sort_by_date: bool) -> List[int]:
'''
This function reads in our entire collection of message
ids, which can be millions of integers for some installations.
And then we sort the list. This is necessary to ensure
that the sort order of incoming ids matches the sort order
of date_sent, which isn't always guaranteed by our
utilities that convert third party chat data. We also
need to move our ids to a new range if we're dealing
with a server that has data for other realms.
'''
if sort_by_date:
tups: List[Tuple[int, int]] = []
else:
message_ids: List[int] = []
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
# Aggressively free up memory.
del data['zerver_usermessage']
for row in data['zerver_message']:
# We truncate date_sent to int to theoretically
# save memory and speed up the sort. For
# Zulip-to-Zulip imports, the
# message_id will generally be a good tiebreaker.
# If we occasionally mis-order the ids for two
# messages from the same second, it's not the
# end of the world, as it's likely those messages
# arrived to the original server in somewhat
# arbitrary order.
message_id = row['id']
if sort_by_date:
date_sent = int(row['date_sent'])
tup = (date_sent, message_id)
tups.append(tup)
else:
message_ids.append(message_id)
dump_file_id += 1
if sort_by_date:
tups.sort()
message_ids = [tup[1] for tup in tups]
return message_ids
def import_message_data(realm: Realm,
sender_map: Dict[int, Record],
import_dir: Path) -> None:
dump_file_id = 1
while True:
message_filename = os.path.join(import_dir, f"messages-{dump_file_id:06}.json")
if not os.path.exists(message_filename):
break
with open(message_filename, "rb") as f:
data = orjson.loads(f.read())
logging.info("Importing message dump %s", message_filename)
re_map_foreign_keys(data, 'zerver_message', 'sender', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_message', 'recipient', related_table="recipient")
re_map_foreign_keys(data, 'zerver_message', 'sending_client', related_table='client')
fix_datetime_fields(data, 'zerver_message')
# Parser to update message content with the updated attachment URLs
fix_upload_links(data, 'zerver_message')
# We already create mappings for zerver_message ids
# in update_message_foreign_keys(), so here we simply
# apply them.
message_id_map = ID_MAP['message']
for row in data['zerver_message']:
row['id'] = message_id_map[row['id']]
for row in data['zerver_usermessage']:
assert(row['message'] in message_id_map)
fix_message_rendered_content(
realm=realm,
sender_map=sender_map,
messages=data['zerver_message'],
)
logging.info("Successfully rendered Markdown for message batch")
# A LOT HAPPENS HERE.
# This is where we actually import the message data.
bulk_import_model(data, Message)
# Due to the structure of these message chunks, we're
# guaranteed to have already imported all the Message objects
# for this batch of UserMessage objects.
re_map_foreign_keys(data, 'zerver_usermessage', 'message', related_table="message")
re_map_foreign_keys(data, 'zerver_usermessage', 'user_profile', related_table="user_profile")
fix_bitfield_keys(data, 'zerver_usermessage', 'flags')
bulk_import_user_message_data(data, dump_file_id)
dump_file_id += 1
def import_attachments(data: TableData) -> None:
# Clean up the data in zerver_attachment that is not
# relevant to our many-to-many import.
fix_datetime_fields(data, 'zerver_attachment')
re_map_foreign_keys(data, 'zerver_attachment', 'owner', related_table="user_profile")
re_map_foreign_keys(data, 'zerver_attachment', 'realm', related_table="realm")
# Configure ourselves. Django models many-to-many (m2m)
# relations asymmetrically. The parent here refers to the
# Model that has the ManyToManyField. It is assumed here
# the child models have been loaded, but we are in turn
# responsible for loading the parents and the m2m rows.
parent_model = Attachment
parent_db_table_name = 'zerver_attachment'
parent_singular = 'attachment'
child_singular = 'message'
child_plural = 'messages'
m2m_table_name = 'zerver_attachment_messages'
parent_id = 'attachment_id'
child_id = 'message_id'
update_model_ids(parent_model, data, 'attachment')
# We don't bulk_import_model yet, because we need to first compute
# the many-to-many for this table.
# First, build our list of many-to-many (m2m) rows.
# We do this in a slightly convoluted way to anticipate
# a future where we may need to call re_map_foreign_keys.
m2m_rows: List[Record] = []
for parent_row in data[parent_db_table_name]:
for fk_id in parent_row[child_plural]:
m2m_row: Record = {}
m2m_row[parent_singular] = parent_row['id']
m2m_row[child_singular] = ID_MAP['message'][fk_id]
m2m_rows.append(m2m_row)
# Create our table data for insert.
m2m_data: TableData = {m2m_table_name: m2m_rows}
convert_to_id_fields(m2m_data, m2m_table_name, parent_singular)
convert_to_id_fields(m2m_data, m2m_table_name, child_singular)
m2m_rows = m2m_data[m2m_table_name]
# Next, delete out our child data from the parent rows.
for parent_row in data[parent_db_table_name]:
del parent_row[child_plural]
# Update 'path_id' for the attachments
for attachment in data[parent_db_table_name]:
attachment['path_id'] = path_maps['attachment_path'][attachment['path_id']]
# Next, load the parent rows.
bulk_import_model(data, parent_model)
# Now, go back to our m2m rows.
# TODO: Do this the kosher Django way. We may find a
# better way to do this in Django 1.9 particularly.
with connection.cursor() as cursor:
sql_template = SQL('''
INSERT INTO {m2m_table_name} ({parent_id}, {child_id}) VALUES %s
''').format(
m2m_table_name=Identifier(m2m_table_name),
parent_id=Identifier(parent_id),
child_id=Identifier(child_id),
)
tups = [(row[parent_id], row[child_id]) for row in m2m_rows]
execute_values(cursor.cursor, sql_template, tups)
logging.info('Successfully imported M2M table %s', m2m_table_name)
def import_analytics_data(realm: Realm, import_dir: Path) -> None:
analytics_filename = os.path.join(import_dir, "analytics.json")
if not os.path.exists(analytics_filename):
return
logging.info("Importing analytics data from %s", analytics_filename)
with open(analytics_filename, "rb") as f:
data = orjson.loads(f.read())
# Process the data through the fixer functions.
fix_datetime_fields(data, 'analytics_realmcount')
re_map_foreign_keys(data, 'analytics_realmcount', 'realm', related_table="realm")
update_model_ids(RealmCount, data, 'analytics_realmcount')
bulk_import_model(data, RealmCount)
fix_datetime_fields(data, 'analytics_usercount')
re_map_foreign_keys(data, 'analytics_usercount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_usercount', 'user', related_table="user_profile")
update_model_ids(UserCount, data, 'analytics_usercount')
bulk_import_model(data, UserCount)
fix_datetime_fields(data, 'analytics_streamcount')
re_map_foreign_keys(data, 'analytics_streamcount', 'realm', related_table="realm")
re_map_foreign_keys(data, 'analytics_streamcount', 'stream', related_table="stream")
update_model_ids(StreamCount, data, 'analytics_streamcount')
bulk_import_model(data, StreamCount)
|
py
|
1a5d1684171478f1b7ca0267cec7b7dc8ea9a09c
|
import numpy,random,os
lr = 1
bias = 1
weights = list()
for k in range(3):
weights.append(random.random()) #Assigning random weights
def ptron(inp1,inp2,outp):
outp_pn = inp1*weights[0]+inp2*weights[1]+bias*weights[2]
outp_pn = 1.0/(1+numpy.exp(-outp_pn)) #Sigmoid Function
err = outp - outp_pn
weights[0] += err*inp1*lr #Modifying weights
weights[1] += err*inp2*lr
weights[2] += err*bias*lr
for i in range(100): #Training With Data
ptron(0,0,0) #Passing the tryth values of OR
ptron(1,1,1)
ptron(1,0,0)
ptron(0,1,0)
for x,y in [(0,0),(1,0),(0,1),(1,1)]:
outp_pn = x*weights[0]+y*weights[1]+bias*weights[2]
#Based on the trained wieghts
outp = 1.0/(1+numpy.exp(-outp_pn))
print(str(x) + " AND " + str(y) + " yields: " + str(outp))
|
py
|
1a5d172d8d5a44c65121f828b2bb15b575c5f9e3
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from .. import _utilities, _tables
from ._enums import *
__all__ = ['ManagedInstanceAdministrator']
class ManagedInstanceAdministrator(pulumi.CustomResource):
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
administrator_name: Optional[pulumi.Input[str]] = None,
administrator_type: Optional[pulumi.Input[Union[str, 'ManagedInstanceAdministratorType']]] = None,
login: Optional[pulumi.Input[str]] = None,
managed_instance_name: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
sid: Optional[pulumi.Input[str]] = None,
tenant_id: Optional[pulumi.Input[str]] = None,
__props__=None,
__name__=None,
__opts__=None):
"""
An Azure SQL managed instance administrator.
API Version: 2020-08-01-preview.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[Union[str, 'ManagedInstanceAdministratorType']] administrator_type: Type of the managed instance administrator.
:param pulumi.Input[str] login: Login name of the managed instance administrator.
:param pulumi.Input[str] managed_instance_name: The name of the managed instance.
:param pulumi.Input[str] resource_group_name: The name of the resource group that contains the resource. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] sid: SID (object ID) of the managed instance administrator.
:param pulumi.Input[str] tenant_id: Tenant ID of the managed instance administrator.
"""
if __name__ is not None:
warnings.warn("explicit use of __name__ is deprecated", DeprecationWarning)
resource_name = __name__
if __opts__ is not None:
warnings.warn("explicit use of __opts__ is deprecated, use 'opts' instead", DeprecationWarning)
opts = __opts__
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = dict()
__props__['administrator_name'] = administrator_name
if administrator_type is None and not opts.urn:
raise TypeError("Missing required property 'administrator_type'")
__props__['administrator_type'] = administrator_type
if login is None and not opts.urn:
raise TypeError("Missing required property 'login'")
__props__['login'] = login
if managed_instance_name is None and not opts.urn:
raise TypeError("Missing required property 'managed_instance_name'")
__props__['managed_instance_name'] = managed_instance_name
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__['resource_group_name'] = resource_group_name
if sid is None and not opts.urn:
raise TypeError("Missing required property 'sid'")
__props__['sid'] = sid
__props__['tenant_id'] = tenant_id
__props__['name'] = None
__props__['type'] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:sql:ManagedInstanceAdministrator"), pulumi.Alias(type_="azure-native:sql/v20170301preview:ManagedInstanceAdministrator"), pulumi.Alias(type_="azure-nextgen:sql/v20170301preview:ManagedInstanceAdministrator"), pulumi.Alias(type_="azure-native:sql/v20200202preview:ManagedInstanceAdministrator"), pulumi.Alias(type_="azure-nextgen:sql/v20200202preview:ManagedInstanceAdministrator"), pulumi.Alias(type_="azure-native:sql/v20200801preview:ManagedInstanceAdministrator"), pulumi.Alias(type_="azure-nextgen:sql/v20200801preview:ManagedInstanceAdministrator")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(ManagedInstanceAdministrator, __self__).__init__(
'azure-native:sql:ManagedInstanceAdministrator',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'ManagedInstanceAdministrator':
"""
Get an existing ManagedInstanceAdministrator resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = dict()
__props__["administrator_type"] = None
__props__["login"] = None
__props__["name"] = None
__props__["sid"] = None
__props__["tenant_id"] = None
__props__["type"] = None
return ManagedInstanceAdministrator(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="administratorType")
def administrator_type(self) -> pulumi.Output[str]:
"""
Type of the managed instance administrator.
"""
return pulumi.get(self, "administrator_type")
@property
@pulumi.getter
def login(self) -> pulumi.Output[str]:
"""
Login name of the managed instance administrator.
"""
return pulumi.get(self, "login")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def sid(self) -> pulumi.Output[str]:
"""
SID (object ID) of the managed instance administrator.
"""
return pulumi.get(self, "sid")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> pulumi.Output[Optional[str]]:
"""
Tenant ID of the managed instance administrator.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
Resource type.
"""
return pulumi.get(self, "type")
def translate_output_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
def translate_input_property(self, prop):
return _tables.SNAKE_TO_CAMEL_CASE_TABLE.get(prop) or prop
|
py
|
1a5d186645bea371a34618ca9248bdeab5102acf
|
from queue import Queue
class Node:
def __init__(self, data):
self.data = data
self.parent = None
self.children = dict()
"""
Dictionary whose values are the node children and whose keys are the corresponding nodes
data.
"""
def add_child(self, child):
child.parent = self
self.children[child.data] = child
class Tree:
def __init__(self, root: Node):
self.root = root
def bfs_search(self, data, depth=None):
"""
Searches for a node, given its data. The search starts from the root.
:param data: Data of the node to find.
:param depth: Limits the search to nodes with the given depth.
:return: The node if it's found, None otherwise.
"""
visited, queue = set(), Queue()
# Each element of the queue is a couple (node, level):
queue.put((self.root, 0))
while not queue.empty():
node, level = queue.get()
if depth is not None and level > depth:
break
if depth is None:
if node.data == data:
return node
else:
if level == depth and node.data == data:
return node
for child in node.children.values():
if child in visited:
continue
queue.put((child, level + 1))
visited.add(node)
return None
def _bfs_insert(self, child: Node, parent: Node) -> bool:
node = self.bfs_search(parent.data)
if node is not None:
node.add_child(child)
return True
else:
return False
def insert(self, child: Node, parent: Node) -> bool:
"""
Inserts a node given its parent. Note: insertion is done on the first node with the same
data as the given parent node.
:param child: Node to insert.
:param parent: Parent node.
:return: True if the node has been inserted, False otherwise.
"""
return self._bfs_insert(child, parent)
def parent(self, data):
"""
Gets the parent of a node, given the node data.
:param data: Data of the node to find.
:return: Parent node if found, None otherwise.
"""
node = self.bfs_search(data)
if node is not None:
return node.parent
else:
return None
|
py
|
1a5d18e71357348a639efdd76875e478d2d6a53b
|
# sympsi global imports
from .qutility import *
|
py
|
1a5d1b3e1411f55427bcabe405d127a20274998c
|
# -*- coding: utf-8 -*-
"""
mygeotab.py3.api_async
~~~~~~~~~~~~~~~~~~~~~~
Async/Await-able (Python 3.5+) public objects and methods wrapping the MyGeotab API.
"""
import asyncio
import sys
if sys.version_info < (3, 5):
raise Exception("Python 3.5+ is required to use the async API")
import ssl
from concurrent.futures import TimeoutError
import aiohttp
from mygeotab import api
from mygeotab.api import DEFAULT_TIMEOUT, get_headers
from mygeotab.exceptions import MyGeotabException, TimeoutException, AuthenticationException
from mygeotab.serializers import json_serialize, json_deserialize
class API(api.API):
"""A simple, asynchronous, and Pythonic wrapper for the MyGeotab API."""
def __init__(
self,
username,
password=None,
database=None,
session_id=None,
server="my.geotab.com",
timeout=DEFAULT_TIMEOUT,
proxies=None,
):
"""
Initialize the asynchronous MyGeotab API object with credentials.
:param username: The username used for MyGeotab servers. Usually an email address.
:param password: The password associated with the username. Optional if `session_id` is provided.
:param database: The database or company name. Optional as this usually gets resolved upon authentication.
:param session_id: A session ID, assigned by the server.
:param server: The server ie. my23.geotab.com. Optional as this usually gets resolved upon authentication.
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:param proxies: The proxies dictionary to apply to the request.
:raise Exception: Raises an Exception if a username, or one of the session_id or password is not provided.
"""
super().__init__(username, password, database, session_id, server, timeout, proxies=proxies)
async def call_async(self, method, **parameters):
"""Makes an async call to the API.
:param method: The method name.
:param params: Additional parameters to send (for example, search=dict(id='b123') )
:return: The JSON result (decoded into a dict) from the server.abs
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
if method is None:
raise Exception("A method name must be specified")
params = api.process_parameters(parameters)
if self.credentials and not self.credentials.session_id:
self.authenticate()
if "credentials" not in params and self.credentials.session_id:
params["credentials"] = self.credentials.get_param()
try:
result = await _query(self._server, method, params, verify_ssl=self._is_verify_ssl)
if result is not None:
self.__reauthorize_count = 0
return result
except MyGeotabException as exception:
if exception.name == "InvalidUserException":
if self.__reauthorize_count == 0 and self.credentials.password:
self.__reauthorize_count += 1
self.authenticate()
return await self.call_async(method, **parameters)
else:
raise AuthenticationException(
self.credentials.username, self.credentials.database, self.credentials.server
)
raise
async def multi_call_async(self, calls):
"""Performs an async multi-call to the API
:param calls: A list of call 2-tuples with method name and params (for example, ('Get', dict(typeName='Trip')) )
:return: The JSON result (decoded into a dict) from the server
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server
:raise TimeoutException: Raises when the request does not respond after some time.
"""
formatted_calls = [dict(method=call[0], params=call[1] if len(call) > 1 else {}) for call in calls]
return await self.call_async("ExecuteMultiCall", calls=formatted_calls)
async def get_async(self, type_name, **parameters):
"""Gets entities asynchronously using the API. Shortcut for using async_call() with the 'Get' method.
:param type_name: The type of entity.
:param parameters: Additional parameters to send.
:return: The JSON result (decoded into a dict) from the server.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
if parameters:
results_limit = parameters.get("resultsLimit", None)
if results_limit is not None:
del parameters["resultsLimit"]
if "search" in parameters:
parameters.update(parameters["search"])
del parameters["search"]
parameters = dict(search=parameters, resultsLimit=results_limit)
return await self.call_async("Get", type_name=type_name, **parameters)
async def add_async(self, type_name, entity):
"""
Adds an entity asynchronously using the API. Shortcut for using async_call() with the 'Add' method.
:param type_name: The type of entity.
:param entity: The entity to add.
:return: The id of the object added.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
return await self.call_async("Add", type_name=type_name, entity=entity)
async def set_async(self, type_name, entity):
"""Sets an entity asynchronously using the API. Shortcut for using async_call() with the 'Set' method.
:param type_name: The type of entity
:param entity: The entity to set
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server
"""
return await self.call_async("Set", type_name=type_name, entity=entity)
async def remove_async(self, type_name, entity):
"""Removes an entity asynchronously using the API. Shortcut for using async_call() with the 'Remove' method.
:param type_name: The type of entity.
:param entity: The entity to remove.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
return await self.call_async("Remove", type_name=type_name, entity=entity)
@staticmethod
def from_credentials(credentials):
"""Returns a new async API object from an existing Credentials object.
:param credentials: The existing saved credentials.
:return: A new API object populated with MyGeotab credentials.
"""
return API(
username=credentials.username,
password=credentials.password,
database=credentials.database,
session_id=credentials.session_id,
server=credentials.server,
)
async def server_call_async(method, server, timeout=DEFAULT_TIMEOUT, verify_ssl=True, **parameters):
"""Makes an asynchronous call to an un-authenticated method on a server.
:param method: The method name.
:param server: The MyGeotab server.
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:param verify_ssl: If True, verify the SSL certificate. It's recommended not to modify this.
:param parameters: Additional parameters to send (for example, search=dict(id='b123') ).
:return: The JSON result (decoded into a dict) from the server.
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server.
:raise TimeoutException: Raises when the request does not respond after some time.
"""
if method is None:
raise Exception("A method name must be specified")
if server is None:
raise Exception("A server (eg. my3.geotab.com) must be specified")
parameters = api.process_parameters(parameters)
return await _query(server, method, parameters, timeout=timeout, verify_ssl=verify_ssl)
async def _query(server, method, parameters, timeout=DEFAULT_TIMEOUT, verify_ssl=True):
"""Formats and performs the asynchronous query against the API
:param server: The server to query.
:param method: The method name.
:param parameters: A dict of parameters to send
:param timeout: The timeout to make the call, in seconds. By default, this is 300 seconds (or 5 minutes).
:param verify_ssl: Whether or not to verify SSL connections
:return: The JSON-decoded result from the server
:raise MyGeotabException: Raises when an exception occurs on the MyGeotab server
:raise TimeoutException: Raises when the request does not respond after some time.
:raise aiohttp.ClientResponseError: Raises when there is an HTTP status code that indicates failure.
"""
api_endpoint = api.get_api_url(server)
params = dict(id=-1, method=method, params=parameters)
headers = get_headers()
conn = aiohttp.TCPConnector(ssl=ssl.SSLContext(ssl.PROTOCOL_TLSv1_2) if verify_ssl else False)
try:
async with aiohttp.ClientSession(connector=conn) as session:
response = await session.post(
api_endpoint, data=json_serialize(params), headers=headers, timeout=timeout, allow_redirects=True
)
response.raise_for_status()
content_type = response.headers.get("Content-Type")
body = await response.text()
except (TimeoutError, asyncio.TimeoutError):
raise TimeoutException(server)
if content_type and "application/json" not in content_type.lower():
return body
return api._process(json_deserialize(body))
|
py
|
1a5d1badee2c3b2a5d349e264429fbd758dfda6a
|
"""
Master code to take input, generate features, call MALLET and use the probabilities for generating language tags
"""
# !/usr/bin/python
import sys
import subprocess
import re
import os
import time
import codecs
import pickle
from utils import extractFeatures as ef
from utils import generateLanguageTags as genLangTag
from collections import OrderedDict
from configparser import ConfigParser
def readConfig():
"""
Read config file to load global variables for the project
"""
global language_1_dicts
global language_2_dicts
global memoize_dict
global combined_dicts
global CLASSIFIER_PATH
global TMP_FILE_PATH
global DICT_PATH
global MALLET_PATH
global dict_prob_yes
global dict_prob_no
global memoize_dict_file
global verbose
global lang1
global lang2
# initialize dictionary variables
language_1_dicts = {}
language_2_dicts = {}
# initialize list of dictionary words
combined_dicts = []
# read config
config = ConfigParser()
config.read("config.ini")
config_paths = config["DEFAULT PATHS"]
config_probs = config["DICTIONARY PROBABILITY VALUES"]
config_dicts = config["DICTIONARY NAMES"]
config_gen = config["GENERAL"]
# setup paths for classifier, tmp folder, dictionaries and mallet
CLASSIFIER_PATH = config_paths["CLASSIFIER_PATH"] if config_paths["CLASSIFIER_PATH"] else os.path.join(
os.getcwd(), 'classifiers', 'HiEn.classifier')
TMP_FILE_PATH = config_paths["TMP_FILE_PATH"] if config_paths["TMP_FILE_PATH"] else os.path.join(
os.getcwd(), 'tmp', '')
DICT_PATH = config_paths["DICT_PATH"] if config_paths["DICT_PATH"] else os.path.join(
os.getcwd(), 'dictionaries', '')
MALLET_PATH = config_paths["MALLET_PATH"] if config_paths["MALLET_PATH"] else os.path.join(
os.getcwd(), 'mallet-2.0.8', 'bin', 'mallet')
# initialize probability values for the correct and incorrect language
dict_prob_yes = config_probs["dict_prob_yes"] if config_probs["dict_prob_yes"] else 0.999999999
dict_prob_no = config_probs["dict_prob_no"] if config_probs["dict_prob_no"] else 1E-9
# initialize memoize_dict from file is already present else with an empty dictionary
memoize_dict_file = config_dicts["memoize_dict_file"] if config_dicts["memoize_dict_file"] else "memoize_dict.pkl"
if os.path.isfile(DICT_PATH + memoize_dict_file):
with open(DICT_PATH + memoize_dict_file, "rb") as fp:
memoize_dict = pickle.load(fp)
else:
memoize_dict = {}
# by default verbose is ON
verbose = int(config_gen["verbose"]) if config_gen["verbose"] else 1
# get language names by default language 1 is HINDI and language 2 is ENGLISH
lang1 = config_gen["language_1"].upper(
) if config_gen["language_1"] else "HINDI"
lang2 = config_gen["language_2"].upper(
) if config_gen["language_2"] else "ENGLISH"
lang_1dict_names = config_dicts["language_1_dicts"].split(
",") if config_dicts["language_1_dicts"] else "hindict1"
lang_2dict_names = config_dicts["language_2_dicts"].split(
",") if config_dicts["language_2_dicts"] else "eng0dict1, eng1dict1"
# initialize language_1_dict and language_2_dict with all the sub dictionaries
for dict_names in lang_1dict_names:
language_1_dicts[dict_names.strip()] = {}
for dict_names in lang_2dict_names:
language_2_dicts[dict_names.strip()] = {}
def createDicts():
"""
Create and populate language dictionaries for Language 1 and Language 2
"""
global language_1_dicts
global language_2_dicts
global combined_dicts
global DICT_PATH
global lang1
global lang2
language_1_words = []
language_2_words = []
# read config to get dictionary structures
config = ConfigParser()
config.read("config.ini")
dict_struct = dict(config.items("DICTIONARY HIERARCHY"))
# create language_1 dictionary
for sub_dict in language_1_dicts:
input_files = dict_struct[sub_dict].split(",")
for filename in input_files:
with open(DICT_PATH + filename.strip(), 'r') as dictfile:
words = dictfile.read().split('\n')
for w in words:
language_1_dicts[sub_dict][w.strip().lower()] = ''
language_1_words.extend(list(language_1_dicts[sub_dict].keys()))
print(lang1, 'dictionary created')
# create language_2 dictionary
for sub_dict in language_2_dicts:
input_files = dict_struct[sub_dict].split(",")
for filename in input_files:
with open(DICT_PATH + filename.strip(), 'r') as dictfile:
words = dictfile.read().split('\n')
for w in words:
language_2_dicts[sub_dict][w.strip().lower()] = ''
language_2_words.extend(list(language_2_dicts[sub_dict].keys()))
print(lang2, 'dictionary created')
# populate the combined word list
combined_dicts.extend(language_1_words)
combined_dicts.extend(language_2_words)
def dictTagging(word, tag):
"""
Use language dictionaries to tag words
"""
global language_1_dicts
global language_2_dicts
global lang1
global lang2
dhin, den0, den1 = 0, 0, 0
word = word
if word.lower() in language_1_dicts["hindict1"].keys():
dhin = 1
if word.lower() in language_2_dicts["eng0dict1"].keys():
den0 = 1
if word.lower() in language_2_dicts["eng1dict1"].keys():
den1 = 1
# if not den0 and not den1 and not dhin : do nothing
if (not den0 and not den1 and dhin) or (not den0 and den1 and dhin): # make HI
tag = lang1[:2]
if (not den0 and den1 and not dhin) or (den0 and not dhin): # make EN
tag = lang2[:2]
# if den0 and not den1 and not dhin : subsumed
# if den0 and not den1 and dhin : do nothing
# if den0 and den1 and not dhin : sumsumed
# if den0 and den1 and dhin : do nothing
return tag
def dictLookup(word):
"""
Check whether a word is already present in a dictionary
"""
global combined_dicts
word = word.lower()
if word in set(combined_dicts):
return True
return False
def blurb2Dict(blurb):
"""
Convert a str blurb to an ordered dictionary for comparison
"""
dic2 = OrderedDict()
wordlist = []
for line in blurb.split("\n"):
line = line.split("\t")
word = line[0].split()
tags = line[1:]
if len(word) != 0:
dic2[word[0]] = tags
wordlist.append(word)
return dic2, wordlist
def memoizeWord(mallet_output):
"""
Update the memoize_dict with words that are recently classified by mallet
"""
global memoize_dict
mallet_output = blurb2Dict(mallet_output)[0]
for word in mallet_output.keys():
memoize_dict[word] = mallet_output[word]
def mergeBlurbs(blurb, mallet_output, blurb_dict):
"""
Combine probabilities of words from both MALLET and dictionary outputs
"""
global dict_prob_yes
global dict_prob_no
global verbose
global lang1
global lang2
# convert main blurb to OrderedDict
main_dict = OrderedDict()
wordlist_main = []
for line in blurb.split("\n"):
word, tag = line.split("\t")
main_dict[word] = tag
wordlist_main.append([word])
# populate dictionary based language tags with fixed probabilities for correct and incorrect
blurb_dict = blurb_dict.replace(lang1[:2], lang1[:2].lower(
) + "\t" + str(dict_prob_yes) + "\t" + lang2[:2].lower() + "\t" + str(dict_prob_no))
blurb_dict = blurb_dict.replace(lang2[:2], lang2[:2].lower(
) + "\t" + str(dict_prob_yes) + "\t" + lang1[:2].lower() + "\t" + str(dict_prob_no))
blurb_dict, _wordlist_dict = blurb2Dict(blurb_dict)
# convert mallet blurb to OrderedDict only when it isn't empty
mallet_is_empty = 1
if mallet_output != "":
mallet_is_empty = 0
blurb_mallet, _wordlist_mallet = blurb2Dict(mallet_output)
# combining logic
# iterate over the word list and populate probability values for tags from both dictionary and MALLET output
for idx, word in enumerate(wordlist_main):
current_word = word[0]
updated_word = word
if current_word in blurb_dict:
updated_word.extend(blurb_dict[current_word])
wordlist_main[idx] = updated_word
else:
if not mallet_is_empty:
if current_word in blurb_mallet:
updated_word.extend(blurb_mallet[current_word])
wordlist_main[idx] = updated_word
# convert the updated blurb to str
blurb_updated = []
st = ""
for word in wordlist_main:
st = word[0]
for tag in word[1:]:
st = st + "\t" + str(tag)
st = st.strip()
blurb_updated.append(st)
st = ""
blurb_updated = "\n".join(blurb_updated)
if verbose != 0:
print(blurb_updated, "\n---------------------------------\n")
return blurb_updated
def callMallet(inputText, classifier):
"""
Invokes the mallet classifier with input text and returns Main BLURB, MALLET OUTPUT and BLURB DICT
"""
global combined_dicts
global TMP_FILE_PATH
global memoize_dict
"""
DICIONARY CREATION CODE
"""
# create a dictionary if not already created, needed when using as a library
if len(combined_dicts) == 0:
createDicts()
# split words based on whether they are already present in the dictionary
# new words go to MALLET for generating probabilities
fixline_mallet = list(filter(lambda x: not dictLookup(x), inputText))
fixline_dict = list(
filter(lambda x: (x not in fixline_mallet) or (x in memoize_dict), inputText))
# create str blurb for mallet and dictionary input
blurb = '\n'.join(["%s\toth" % (v.strip()) for v in inputText])
blurb_mallet = '\n'.join(["%s\toth" % (v.strip()) for v in fixline_mallet])
dict_tags = list(map(lambda x: dictTagging(x, "oth"), fixline_dict))
# get dict_tags from words that are already classified by mallet
for idx, word in enumerate(fixline_dict):
if word in memoize_dict:
dict_tags[idx] = memoize_dict[word]
"""
LOGIC FOR WORDS THAT ARE PRESENT IN MULTIPLE DICTIONARIES
"""
fixline_mallet_corrections = []
for t, w in zip(dict_tags, fixline_dict):
# if even after dict lookup, some words are still tagged oth due to cornercase then call mallet output on those words
if t == "oth":
fixline_mallet_corrections.append(w)
# update blurb_mallet
blurb_mallet_corrections = '\n'.join(
["%s\toth" % (v.strip()) for v in fixline_mallet_corrections])
# if mallet is not empty then you need to append the correction to the bottom, seperated by a \n otherwise you can just append it directly
if blurb_mallet != "":
blurb_mallet = blurb_mallet + "\n" + blurb_mallet_corrections
else:
blurb_mallet += blurb_mallet_corrections
# remove the words from blurb_dict
dict_tags = filter(lambda x: x != "oth", dict_tags)
fixline_dict = filter(
lambda x: x not in fixline_mallet_corrections, fixline_dict)
blurb_dict = ""
for word, tag in zip(fixline_dict, dict_tags):
if not type(tag) == list:
blurb_dict = blurb_dict + "%s\t%s" % (word.strip(), tag) + "\n"
else:
tmp_tags = "\t".join(tag)
blurb_dict = blurb_dict + \
"%s\t%s" % (word.strip(), tmp_tags) + "\n"
"""
CALLING MALLET
"""
# this checks the case when blurb_mallet only has a \n due to words being taken into blurb_dict
if blurb_mallet != "\n":
# open a temp file and generate input features for mallet
open(TMP_FILE_PATH + 'temp_testFile.txt', 'w').write(blurb_mallet)
ef.main(TMP_FILE_PATH + 'temp_testFile.txt')
# initialize t7 to track time taken by mallet
t7 = time.time()
# call mallet to get probability output
subprocess.Popen(MALLET_PATH + " classify-file --input " + TMP_FILE_PATH + "temp_testFile.txt.features" +
" --output " + TMP_FILE_PATH + "temp_testFile.txt.out --classifier %s" % (classifier), shell=True).wait()
t_total = time.time()-t7
mallet_output = open(
TMP_FILE_PATH + 'temp_testFile.txt.out', 'r').read()
else:
mallet_output = ""
# memoize the probabilities of words already classified
memoizeWord(mallet_output)
print("time for mallet classification", t_total, file=sys.stderr)
return blurb, mallet_output, blurb_dict
def genUID(results, fixline):
"""
ADDING UNIQUE IDS TO OUTPUT FILE AND FORMATTING
where:
fixline is input text
results is language probabilities for each word
"""
# NEW add unique id to results - which separator
uniqueresults = list(range(len(results)))
for idx in range(len(results)):
uniqueresults[idx] = results[idx]
uniqueresults[idx][0] = uniqueresults[idx][0]+"::{}".format(idx)
langOut = OrderedDict()
for v in uniqueresults:
langOut[v[0]] = OrderedDict()
for ii in range(1, len(v), 2):
langOut[v[0]][v[ii]] = float(v[ii+1])
fixmyline = fixline
fnewlines = list(range(len(fixmyline)))
for vvv in range(len(fixmyline)):
fnewlines[vvv] = fixmyline[vvv]+"::{}".format(vvv)
ffixedline = " ".join(fnewlines)
return ffixedline, langOut
def langIdentify(inputText, classifier):
"""
Get language tags for sentences passed as a list
Input : list of sentences
Output : list of words for each sentence with the language probabilities
"""
global TMP_FILE_PATH
inputText = inputText.split("\n")
outputText = []
"""
CONFIG FILE CODE
"""
readConfig()
"""
DICIONARY CREATION CODE
"""
createDicts()
for line in inputText:
text = re.sub(r"([\w@#\'\\\"]+)([.:,;?!]+)", r"\g<1> \g<2> ", line)
text = text.split()
text = [x.strip() for x in text]
text = [x for x in text if not re.match(r"^\s*$", x)]
"""
CALLING MALLET CODE HERE
"""
blurb, mallet_output, blurb_dict = callMallet(text, classifier)
"""
WRITE COMBINING LOGIC HERE
"""
blurb_tagged = mergeBlurbs(blurb, mallet_output, blurb_dict)
results = [v.split("\t") for v in blurb_tagged.split("\n")]
# generate unique id for output sentences and format
ffixedline, langOut = genUID(results, text)
# get language tags using context logic from probabilities
out = genLangTag.get_res(ffixedline, langOut)
realOut = re.sub("::[0-9]+/", "/", out)
# get word, label pairs in the output
realOut = realOut.split()
realOut = [tuple(word.split("/")) for word in realOut]
# generate output
outputText.append(realOut)
return outputText
def langIdentifyFile(filename, classifier):
"""
Get language tags for sentences from an input file
Input file: tsv with sentence id in first column and sentence in second column
Output file: tsv with word per line, sentences separated by newline
Output of sentence id in first column and best language tag in last column
"""
global TMP_FILE_PATH
# reading the input file
fil = codecs.open(filename, 'r', errors="ignore")
outfil = codecs.open(filename+"_tagged", 'a',
errors="ignore", encoding='utf-8')
line_count = 0
line = (fil.readline()).strip()
while line is not None and line != "":
line_count += 1
if (line_count % 100 == 0):
print(line_count, file=sys.stderr)
if not line.startswith("#"):
# reading sentences and basic pre-processing
lineid = "\t".join(line.split("\t")[:1])
line = " ".join(line.split("\t")[1:])
fline = re.sub(r"([\w@#\'\\\"]+)([.:,;?!]+)",
r"\g<1> \g<2> ", line)
fixline = fline.split()
fixline = [x.strip() for x in fixline]
fixline = [x for x in fixline if not re.match(r"^\s*$", x)]
"""
CALLING MALLET CODE HERE
"""
blurb, mallet_output, blurb_dict = callMallet(fixline, classifier)
"""
WRITE COMBINING LOGIC HERE
"""
blurb_tagged = mergeBlurbs(blurb, mallet_output, blurb_dict)
results = [v.split("\t") for v in blurb_tagged.split("\n")]
# generate unique id for output sentences and format
ffixedline, langOut = genUID(results, fixline)
# get language tags using context logic from probabilities
out = genLangTag.get_res(ffixedline, langOut)
outfil.write(u"##"+lineid+u"\t"+line+u"\n")
realout = re.sub("::[0-9]+/", "/", out)
outfil.write(lineid+u"\t"+realout+u'\n')
else:
print("### skipped commented line:: " + line.encode('utf-8') + "\n")
outfil.write("skipped line" + line.encode('utf-8') + "\n")
line = (fil.readline()).strip()
fil.close()
outfil.close()
print("written to " + filename + "_tagged")
def writeMemoizeDict():
"""
Write the Memoization Dictionary to the disk, update it with new words if already present
"""
if os.path.isfile(DICT_PATH + memoize_dict_file):
# if file already exists, then update memoize_dict before writing
with open(DICT_PATH + memoize_dict_file, "rb") as fp:
memoize_file = pickle.load(fp)
if memoize_file != memoize_dict:
print("updating memoize dictionary")
memoize_dict.update(memoize_file)
# write the memoize_dict to file
with open(DICT_PATH + memoize_dict_file, "wb") as fp:
pickle.dump(memoize_dict, fp)
if __name__ == "__main__":
"""
CONFIG FILE CODE
"""
readConfig()
"""
DICIONARY CREATION CODE
"""
createDicts()
"""
CLASSIFICATION CODE
"""
blurb = sys.argv[1]
print(blurb)
print(sys.argv)
classifier = CLASSIFIER_PATH
mode = "file"
if len(sys.argv) > 2:
mode = sys.argv[1]
blurb = sys.argv[2]
if len(sys.argv) > 3:
classifer = sys.argv[3]
if mode == "file" or mode == "f":
# CHECK FILE EXISTS
langIdentifyFile(blurb, classifier)
else:
langIdentify(blurb, classifier)
"""
WRITE UPDATED MEMOIZE DICTIONARY TO DISK
"""
writeMemoizeDict()
exit()
|
py
|
1a5d1c1653942c5dacda602e3845fbfa3b249fa7
|
"""
*G♯ - Level 0*
"""
from ..._pitch import Pitch
__all__ = ["Gs0"]
class Gs0(
Pitch,
):
pass
|
py
|
1a5d1d6b6f1185d9da5dddb7e23715274b515c73
|
"""
Package `ffm.nonlinear`.
`ffm` stands for Fickett-Faria model.
Fickett--Faria is detonation-analogue model based on one-dimensional reactive
Burgers' equation with one-step chemistry.
"""
from .animator import Animator
from .asciireader import ASCIIReader
from .asciiwriter import ASCIIWriter
from .config import Config
from .problem import Problem
from .reader import Reader
from .rhsevaluator import RHSEvaluator
from ..common.zndsolver import ZNDSolver
|
py
|
1a5d1df0013d5a7bfd8f6fbe73b4183b91ed1327
|
import Tkinter
# snake0.py
from Tkinter import *
def mousePressed(event):
canvas = event.widget.canvas
redrawAll(canvas)
def keyPressed(event):
canvas = event.widget.canvas
redrawAll(canvas)
def timerFired(canvas):
redrawAll(canvas)
delay = 250 # milliseconds
canvas.after(delay, timerFired, canvas) # pause, then call timerFired again
def redrawAll(canvas):
canvas.delete(ALL)
drawSnakeBoard(canvas)
def drawSnakeBoard(canvas):
space = 30
findSnakeHead(canvas)
for i in range(len(canvas.data["snakeBoard"])):
canvas.create_line(i*space, 0,i*space, 300)
canvas.create_line(0, i*space, 300, i*space)
for r, row in enumerate (canvas.data["snakeBoard"]):
for c, col in enumerate (row):
if col > 0 :
drawSnakeCell(canvas, r, c)
return
def drawSnakeCell(canvas, row, col):
x0 = 30*col
y0 = 30*row
x1 = 30*col + 30
y1 = 30*row + 30
id = canvas.create_oval(x0, y0, x1, y1, fill = "blue")
margin = 5
cellSize = 30
return
def findSnakeHead(canvas):
for r, row in enumerate(canvas.data['snakeBoard']):
for c, col in enumerate(row):
if col == 9:
break
if col == 9:
break
canvas.data["headRow"]= r
canvas.data["headCol"]= c
def removeTail(canvas):
for r, row in enumerate(canvas.data['snakeBoard']):
for c, col in enumerate(row):
if col > 0:
canvas.data['snakeBoard'][r][c] = col - 1
def placeFood(canvas):
snakeBoard = canvas.data["snakeBoard"]
rows = len(snakeBoard)
cols = len(snakeBoard[0])
while True:
row = random.randint(0,rows-1)
col = random.randint(0,cols-1)
if (snakeBoard[row][col] == 0):
break
snakeBoard[row][col] = -1
def moveSnake(canvas, drow, dcol):
headCol = canvas.data["headCol"]
headRow = canvas.data["headRow"]
newHeadRow = headRow + drow;
if newHeadRow < 0:
newHeadRow = len(canvas.data["snakeBoard"]) - 1;
elif newHeadRow >= len(canvas.data["snakeBoard"]):
newHeadRow = 0
newHeadCol = headCol + dcol;
if newHeadCol < 0:
newHeadCol = len(canvas.data["snakeBoard"][0]) - 1;
elif newHeadCol >= len(canvas.data["snakeBoard"][0]):
newHeadCol = 0
canvas.data["snakeBoard"][newHeadRow][newHeadCol] = canvas.data["snakeBoard"][headRow][headCol] + 1
canvas.data["headRow"] = newHeadRow
canvas.data["headCol"] = newHeadCol
removeTail(canvas);
def gameOver(canvas):
canvas.data["isGameOver"] = True
def keyPressed(event):
canvas = event.widget.canvas
if (event.keysym == "Up"):
moveSnake(canvas, -1, 0)
elif (event.keysym == "Down"):
moveSnake(canvas, +1, 0)
elif (event.keysym == "Left"):
moveSnake(canvas, 0,-1)
elif (event.keysym == "Right"):
moveSnake(canvas, 0,+1)
elif (event.char == "d"):
canvas.data["inDebugMode"] = not canvas.data["inDebugMode"]
redrawAll(canvas)
def loadSnakeBoard(canvas):
canvas.data["snakeBoard"] = [ [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 4, 5, 6, 0, 0, 0 ],
[ 0, 0, 0, 0, 3, 0, 7, 0, 0, 0 ],
[ 0, 0, 0, 1, 2, 0, 8, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 9, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],
[ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ]
]
findSnakeHead(canvas)
return
def printInstructions():
print("Snake! Use the arrow keys to move the snake." +
" Eat food to grow."+
"Stay on the board!"+
"And don't crash into yourslf")
return
def init(canvas):
printInstructions()
loadSnakeBoard(canvas)
redrawAll(canvas)
########### copy-paste below here ###########
def run():
# create the root and the canvas
root = Tk()
canvas = Canvas(root, width=310, height=310)
canvas.pack()
# Store canvas in root and in canvas itself for callbacks
root.canvas = canvas.canvas = canvas
# Set up canvas data and call init
canvas.data = { }
init(canvas)
# set up events
root.bind("<Button-1>", mousePressed)
root.bind("<Key>", keyPressed)
timerFired(canvas)
# and launch the app
root.mainloop() # This call BLOCKS (so your program waits until you close the window!)
if __name__ == '__main__':
run()
|
py
|
1a5d1e58040060e650cef61e233a2c68ef26cd83
|
import torch
import torch.nn as nn
from torch.autograd import Variable
from functools import reduce
class LambdaBase(nn.Sequential):
def __init__(self, fn, *args):
super(LambdaBase, self).__init__(*args)
self.lambda_func = fn
def forward_prepare(self, input):
output = []
for module in self._modules.values():
output.append(module(input))
return output if output else input
class Lambda(LambdaBase):
def forward(self, input):
return self.lambda_func(self.forward_prepare(input))
class LambdaMap(LambdaBase):
def forward(self, input):
return list(map(self.lambda_func,self.forward_prepare(input)))
class LambdaReduce(LambdaBase):
def forward(self, input):
return reduce(self.lambda_func,self.forward_prepare(input))
resnext_101_64x4d = nn.Sequential( # Sequential,
nn.Conv2d(3,64,(7, 7),(2, 2),(3, 3),1,1,bias=False),
nn.BatchNorm2d(64),
nn.ReLU(),
nn.MaxPool2d((3, 3),(2, 2),(1, 1)),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
nn.Sequential( # Sequential,
nn.Conv2d(64,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
nn.Conv2d(256,256,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(256),
nn.ReLU(),
),
nn.Conv2d(256,256,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(256),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(256,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(2, 2),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
nn.Sequential( # Sequential,
nn.Conv2d(256,512,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
nn.Conv2d(512,512,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(512),
nn.ReLU(),
),
nn.Conv2d(512,512,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(512),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(512,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(2, 2),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
nn.Sequential( # Sequential,
nn.Conv2d(512,1024,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
nn.Conv2d(1024,1024,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(1024),
nn.ReLU(),
),
nn.Conv2d(1024,1024,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(1024),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(1024,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048,2048,(3, 3),(2, 2),(1, 1),1,64,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
nn.Sequential( # Sequential,
nn.Conv2d(1024,2048,(1, 1),(2, 2),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048,2048,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
nn.Sequential( # Sequential,
LambdaMap(lambda x: x, # ConcatTable,
nn.Sequential( # Sequential,
nn.Sequential( # Sequential,
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
nn.Conv2d(2048,2048,(3, 3),(1, 1),(1, 1),1,64,bias=False),
nn.BatchNorm2d(2048),
nn.ReLU(),
),
nn.Conv2d(2048,2048,(1, 1),(1, 1),(0, 0),1,1,bias=False),
nn.BatchNorm2d(2048),
),
Lambda(lambda x: x), # Identity,
),
LambdaReduce(lambda x,y: x+y), # CAddTable,
nn.ReLU(),
),
),
nn.AvgPool2d((7, 7),(1, 1)),
Lambda(lambda x: x.view(x.size(0),-1)), # View,
nn.Sequential(Lambda(lambda x: x.view(1,-1) if 1==len(x.size()) else x ),nn.Linear(2048,1000)), # Linear,
)
|
py
|
1a5d1e8a679c52c31f8baf73dd660aa51dd922cc
|
from .views import UplinkViewSet
from .views import SegmentsListViewSet
UPLINK_LIST = UplinkViewSet.as_view({
'get': 'list',
'post': 'create'
})
UPLINK_DETAIL = UplinkViewSet.as_view({
'get': 'retrieve',
'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'
})
SEGMENTS_LIST = SegmentsListViewSet.as_view({
'get': 'list',
'post': 'create'
})
SEGMENTS_DETAIL = SegmentsListViewSet.as_view({
'get': 'list',
'patch': 'partial_update',
'put': 'update',
'delete': 'destroy',
})
|
py
|
1a5d1eafe4c8735dd9dc2b94a9fe975d7046be85
|
import pytest
from .factories.metrics import MetricFactory, RecordFactory
@pytest.mark.django_db
class TestMetricModel:
def test_repr(self):
obj = MetricFactory.create(name='my-metric')
assert str(obj) == 'my-metric'
@pytest.mark.django_db
class TestRecordModel:
def test_repr(self):
obj = RecordFactory.create(value=23)
assert str(obj).startswith('{0}/'.format(obj.metric_id))
assert '/{0:%s}/'.format(obj.timestamp) in str(obj)
assert str(obj).endswith('/23')
|
py
|
1a5d1eeb105e1d98cbc1a07e6cc4fe66ae138684
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# https://realpython.com/async-io-python/ + myself modifications
import asyncio
import random
import time
async def part1(n: int) -> str:
i = random.randint(0, 10)
print(f"{n} part1 sleeping for [{i}] seconds")
await asyncio.sleep(i)
result = f"result1"
print(f"{n} part1 returning [{result}]")
return result
async def part2(n: int, arg: str) -> str:
i = random.randint(0, 10)
print(f"{n} part2 receive [{arg}] and sleeping for [{i}] seconds")
await asyncio.sleep(i)
result = f"result2+{arg}"
print(f"{n} part2 receive [{arg}] returning [{result}]")
return result
async def chain(n: int) -> None:
print(f'{n} Starting')
start = time.perf_counter()
p1 = await part1(n)
p2 = await part2(n, p1)
end = time.perf_counter() - start
print(f"{n} -->Chained result: [{p2}] (took {end:0.2f} seconds).")
async def main(*args):
await asyncio.gather(*(chain(n) for n in args))
if __name__ == "__main__":
import sys
random.seed(444)
args = [1, 2, 3] if len(sys.argv) == 1 else map(int, sys.argv[1:])
start = time.perf_counter()
asyncio.run(main(*args))
end = time.perf_counter() - start
print(f"Program finished in {end:0.2f} seconds.")
|
py
|
1a5d2166eb5d7a1e831a4459aebd0961ce49b3fc
|
import torch
import pandas as pd
import numpy as np
def _isEven(num):
return num % 2 == 0
# return next multiple of 16 that is larger than num
def _nextMult16(num):
return (num // 16 + 1) * 16
# given the current dimension return the needed padding on each side
# for this dimension
def _determineDimPadding(dim):
is_even = _isEven(dim)
next_mult = _nextMult16(dim)
if is_even:
padding_lt = (next_mult - dim) // 2
padding_rb = padding_lt
else:
padding_lt = (next_mult - dim) // 2
padding_rb = padding_lt + 1
return padding_lt, padding_rb
class DataPadding:
# dataV is a tensor variable that is to be padded so that it's dimensions
# are a mutliple of 16. This is mainly used for the DCGAN
@staticmethod
def padData(dataV, row_dim, col_dim):
padding_left, padding_right = _determineDimPadding(row_dim)
padding_top, padding_bottom = _determineDimPadding(col_dim)
torch_padder = torch.nn.ZeroPad2d((padding_left, padding_right,
padding_top, padding_bottom))
temp = torch_padder(dataV)
return temp
|
py
|
1a5d225c3ff7c69a24509578cf1a329a28700395
|
# -*- coding: utf-8 -*-
'''
pip install weather-api
'''
from weather import Weather, Unit
import translator
weather = Weather(unit=Unit.CELSIUS)
'''
w = Weather(Unit.CELSIUS)
lookup = w.lookup_by_latlng(53.3494,-6.2601)
condition = lookup.condition
print(condition.text)
'''
location = weather.lookup_by_location('หนองคาย')
forecasts = location.forecast
def get_now_day():
text="พยากรณ์อากาศวันนี้ \n"+"สภาพอากาศ : "+translator.translator_to_thai(forecasts[0].text)+"\nมีอุณหภูมิสูงสุด : "+forecasts[0].high+" C\nและมีอุณหภูมิต่ำสุด : "+forecasts[0].low+" C"
return text
|
py
|
1a5d22f2c2694deae228107cbbb37963e64d877b
|
#!/usr/bin/env python3
import os
import sys
import math
from hippietrap.hippietrap import HippieTrap, ALL
from hippietrap.pattern import PatternBase
from hippietrap.color import hue_to_color
from time import sleep, time
STEPS = 500
class RainbowPattern(PatternBase):
name = "rainbow"
def pattern(self):
for i in range(STEPS):
self.trap.set_color(ALL, hue_to_color(i / float(STEPS)))
sleep(.02)
if self.stop_thread:
break
|
py
|
1a5d24011dada83b5363b51ab6b17fa102dcb4d9
|
# model settings
temperature = 0.2
with_norm = True
query_dim = 128
model = dict(
type='SimSiamBaseTSNTracker',
backbone=dict(
type='ResNet',
pretrained=None,
depth=18,
out_indices=(3, ),
# strides=(1, 2, 1, 1),
norm_cfg=dict(type='SyncBN', requires_grad=True),
norm_eval=False,
zero_init_residual=True),
# cls_head=None,
# patch_head=None,
att_plugin=dict(
type='SelfAttention', dropout=0., matmul_norm=True, use_residual=True),
img_head=dict(
type='SimSiamHead',
in_channels=512,
norm_cfg=dict(type='SyncBN'),
num_projection_fcs=3,
projection_mid_channels=512,
projection_out_channels=512,
num_predictor_fcs=2,
predictor_mid_channels=128,
predictor_out_channels=512,
with_norm=True,
loss_feat=dict(type='CosineSimLoss', negative=False),
spatial_type='avg'))
# model training and testing settings
train_cfg = dict(
intra_video=False,
att_indices=(3, ),
att_to_target=False,
feat_rescale=True,
aux_as_value=False)
test_cfg = dict(
precede_frames=20,
topk=10,
temperature=0.2,
strides=(1, 2, 1, 1),
out_indices=(2, 3),
neighbor_range=24,
with_first=True,
with_first_neighbor=True,
output_dir='eval_results')
# dataset settings
dataset_type = 'VideoDataset'
dataset_type_val = 'DavisDataset'
data_prefix = 'data/kinetics400/videos_train'
ann_file_train = 'data/kinetics400/kinetics400_train_list_videos.txt'
data_prefix_val = 'data/davis/DAVIS/JPEGImages/480p'
anno_prefix_val = 'data/davis/DAVIS/Annotations/480p'
data_root_val = 'data/davis/DAVIS'
ann_file_val = 'data/davis/DAVIS/ImageSets/davis2017_val_list_rawframes.txt'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_bgr=False)
train_pipeline = [
dict(type='DecordInit'),
dict(type='SampleFrames', clip_len=1, frame_interval=8, num_clips=4),
# dict(type='DuplicateFrames', times=4),
dict(type='DecordDecode'),
dict(
type='RandomResizedCrop',
area_range=(0.2, 1.),
same_across_clip=False,
same_on_clip=False,
same_clip_indices=(1, 3)),
dict(type='Resize', scale=(224, 224), keep_ratio=False),
dict(
type='Flip',
flip_ratio=0.5,
same_across_clip=False,
same_on_clip=False,
same_clip_indices=(1, 3)),
# dict(
# type='ColorJitter',
# brightness=0.4,
# contrast=0.4,
# saturation=0.4,
# hue=0.1,
# p=0.8,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGrayScale',
# p=0.2,
# same_across_clip=False,
# same_on_clip=False),
# dict(
# type='RandomGaussianBlur',
# p=0.5,
# same_across_clip=False,
# same_on_clip=False),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(type='Collect', keys=['imgs', 'label'], meta_keys=[]),
dict(type='ToTensor', keys=['imgs', 'label'])
]
val_pipeline = [
dict(type='SequentialSampleFrames', frame_interval=1),
dict(type='RawFrameDecode'),
dict(type='Resize', scale=(-1, 480), keep_ratio=True),
dict(type='Flip', flip_ratio=0),
dict(type='Normalize', **img_norm_cfg),
dict(type='FormatShape', input_format='NCTHW'),
dict(
type='Collect',
keys=['imgs', 'ref_seg_map'],
meta_keys=('frame_dir', 'frame_inds', 'original_shape', 'seg_map')),
dict(type='ToTensor', keys=['imgs', 'ref_seg_map'])
]
data = dict(
videos_per_gpu=128,
workers_per_gpu=16,
val_workers_per_gpu=1,
train=dict(
type='RepeatDataset',
times=2,
dataset=dict(
type=dataset_type,
ann_file=ann_file_train,
data_prefix=data_prefix,
pipeline=train_pipeline)),
val=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True),
test=dict(
type=dataset_type_val,
ann_file=ann_file_val,
data_prefix=data_prefix_val,
data_root=data_root_val,
anno_prefix=anno_prefix_val,
pipeline=val_pipeline,
test_mode=True))
# optimizer
# optimizer = dict(type='Adam', lr=1e-4)
optimizer = dict(type='SGD', lr=0.05, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(grad_clip=None)
# learning policy
lr_config = dict(policy='CosineAnnealing', min_lr=0, by_epoch=False)
# lr_config = dict(policy='Fixed')
# lr_config = dict(
# policy='step',
# warmup='linear',
# warmup_iters=100,
# warmup_ratio=0.001,
# step=[1, 2])
total_epochs = 50
checkpoint_config = dict(interval=1, max_keep_ckpts=5)
evaluation = dict(
interval=1,
metrics='davis',
key_indicator='feat_1.J&F-Mean',
rule='greater')
log_config = dict(
interval=50,
hooks=[
dict(type='TextLoggerHook'),
# dict(type='TensorboardLoggerHook'),
dict(
type='WandbLoggerHook',
init_kwargs=dict(
project='mmaction2',
name='{{fileBasenameNoExtension}}',
resume=True,
tags=['ssbt'],
dir='wandb/{{fileBasenameNoExtension}}',
config=dict(
model=model,
train_cfg=train_cfg,
test_cfg=test_cfg,
data=data))),
])
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = False
|
py
|
1a5d249461f062453a6b1917bb613d6904f0865c
|
from keras.layers import Dense,Activation,Dropout,Flatten,Conv2D,MaxPooling2D
from keras.models import Sequential
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import matplotlib.pyplot as plt
from random import shuffle
import numpy as np
from glob import glob
Image_width=128
Image_height=59
threshold=50
Epochs=40
input_shape=(Image_width,Image_height,1)
model=Sequential()
model.add(Conv2D(32,(3,3),input_shape=input_shape))
model.add(Activation('relu'))
model.add(Conv2D(32,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(Conv2D(64,(3,3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2,2),strides=(2,2)))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.4))
model.add(Flatten())
model.add(Dense(4))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',optimizer=Adam(lr=1e-4,beta_1=0.9,beta_2=0.999,epsilon=1e-8),metrics=['accuracy'])
call_back=TensorBoard(log_dir="D:\DL PYTHON\AI\TRAINING MODELS\LOGS")
data=np.load("Training_data.npy",allow_pickle=True)
left=[];right=[];forward=[];reverse=[]
for each_sample in data:
if each_sample[1]==[1,0,0,0]:
left.append(each_sample)
elif each_sample[1]==[0,1,0,0]:
right.append(each_sample)
elif each_sample[1]==[0,0,1,0]:
forward.append(each_sample)
elif each_sample[1]==[0,0,0,1]:
reverse.append(each_sample)
training_data=left[:-threshold]+right[:-threshold]+forward[:-threshold]+reverse[:-threshold]
testing_data=left[-threshold:]+right[-threshold:]+forward[-threshold:]+reverse[-threshold:]
print("Total data-points considered for training : {}".format(len(training_data)))
print("Total data-points considered for testing : {}".format(len(testing_data)))
input("Press any key to start training")
x_train=np.array([data[0] for data in training_data]).reshape(len(training_data),Image_width,Image_height,1)
x_train=x_train/255
y_train=np.array([data[1] for data in training_data])
x_test=np.array([data[0] for data in testing_data]).reshape(len(testing_data),Image_width,Image_height,1)
x_test=x_test/255
y_test=np.array([data[1] for data in testing_data])
h=model.fit(x_train,y_train,epochs=Epochs,validation_data=(x_test,y_test),callbacks=[call_back])
model.save('model.h5')
|
py
|
1a5d24a7203f8c474896299c9087ac0a75b9e5f5
|
import random
import timeit
from PIL import Image
from models import Yolov3Model
from config_reader import CocoConfigReader
from utils.dataset import CocoImagePathFileDataset
from utils.dataloader import get_data_loader
from utils.utils import (convert_corner_to_pyplot_repr,
non_max_suppression, load_classes)
import torch
import numpy as np
from matplotlib import pyplot as plt
import matplotlib.patches as patches
from matplotlib.ticker import NullLocator
YOLO_CONFIG_PATH = "../cfg/yolov3.cfg"
COCO_CONFIG_PATH = "../cfg/coco.data"
def plot_detections_on_image(detections, image):
#Ignore image index
detections = detections[:,1:]
#load class names
classes=load_classes("/home/ubuntu/workspace/pytorch_yolov3/data/coco.names")
img = image.permute(1, 2, 0).numpy()
img = Image.fromarray(np.uint8(img*255))
plt.figure()
fig, ax = plt.subplots(1)
ax.imshow(img)
cmap = plt.get_cmap('tab20')
colors = [cmap(i) for i in np.linspace(0, 1, 30)]
unique_labels = torch.unique(detections[:, -1]).cpu()
n_cls_preds = unique_labels.shape[0]
bbox_colors = random.sample(colors, n_cls_preds)
for prediction in detections:
x1, y1, h, w = convert_corner_to_pyplot_repr(
prediction[:4].unsqueeze(0)).squeeze()
class_prob = prediction[-2]
pred_class = prediction[-1]
color = bbox_colors[int(np.where(unique_labels == int(pred_class))[0])]
bbox = patches.Rectangle((x1, y1), h, w, linewidth=2,
edgecolor=color,
facecolor="none")
# Add the bbox to the image
ax.add_patch(bbox)
# Add class with probability
plt.text(x1, y1, s="P(" + classes[int(pred_class)] +f")={class_prob:.2f}",
color='white', verticalalignment='top',
bbox={'color': color, 'pad': 0})
plt.axis('off') #remove axes
plt.gca().xaxis.set_major_locator(NullLocator())#remove axis markings
plt.gca().yaxis.set_major_locator(NullLocator())
plt.savefig('../inference_test.png' , bbox_inches='tight', pad_inches=0.0)
plt.close()
if __name__ == "__main__":
yolo = Yolov3Model(YOLO_CONFIG_PATH)
yolo.load_weights()
yolo.eval()
data_loader = get_data_loader(COCO_CONFIG_PATH, CocoConfigReader,
CocoImagePathFileDataset, mode="valid")
for i, (image, _) in enumerate(data_loader):
out = yolo(image)
out = out.to("cuda")
#Also changes (center_x, center_y, x, y) to (x1, y1, x2, y2)
detections = non_max_suppression(out, object_thresh=0.7)
plot_detections_on_image(detections[0], image[0])
print ("Image generated")
break
|
py
|
1a5d24aaddd417b60f645ba509496f06ae77c58e
|
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2019.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Qiskit Chemistry User Interface imports"""
from ._uipreferences import UIPreferences
__all__ = ['UIPreferences']
|
py
|
1a5d253a72be695283fcaa94b56cc588d1f165a4
|
from __future__ import print_function, division, absolute_import, with_statement, unicode_literals, generators
import os
import argparse
import importlib
def flag(s):
return bool(int(s))
def get_data_name(config):
return '{}__'.format(config.training_data_hparams['source_dataset']['vocab_file'].split('/')[-2])
def get_model_name(config):
name = ''
name += 'model_{}__'.format(config.task)
name += 'dim_{}__'.format(config.dim)
name += 'embdim_{}__'.format(config.embdim)
name += 'nlayerssrc_{}__'.format(config.nlayerssrc)
name += 'nlayerstgt_{}__'.format(config.nlayerstgt)
name += 'bidir_{}__'.format(config.bidir)
return name
def get_train_name(config):
name = ''
name += 'bleu_w_{}__'.format(config.bleu_w)
name += 'max_order_{}__'.format(config.max_order)
name += 'dropout_{}__'.format(config.dropout)
name += 'soft_length_mask_{}__'.format(config.soft_length_mask)
name += 'recall_w_{}__'.format(config.recall_w)
name += 'max_decode_length_{}__'.format(config.max_decode_length)
name += 'gamma_{}__'.format(config.gamma)
name += 'lr_{}__'.format(config.lr)
name += 'pretrain_{}__'.format(config.pretrain)
if config.enable_pg:
if config.enable_xe:
name += 'xe_w_{}__'.format(config.xe_w)
if config.enable_pg:
name += 'pg_w_{}__'.format(config.pg_w)
if config.enable_bleu:
name += 'fix_rate_{}_{}_{}__'.format(config.fix_teach_gap, config.teach_gap, config.teach_cont)
name += 'teach_anneal_{}_{}_{}__'.format(config.initial_teach_rate, config.teach_rate_anneal, config.teach_rate_anneal_steps)
if hasattr(config, 'teach_X'):
name += 'teach_X_{}__'.format(config.teach_X)
if hasattr(config, 'seed'):
name += 'seed_{}__'.format(config.seed)
return name
argparser = argparse.ArgumentParser()
argparser.add_argument('--train', type=str, default='train_config')
argparser.add_argument('--model', type=str, default='model_config')
argparser.add_argument('--data', type=str, default='data_configs')
argparser.add_argument('--verbose', type=str, default='verbose_config')
argparser.add_argument('--running_mode', type=str, default='train')
argparser.add_argument('--caption', type=str, default='')
args = argparser.parse_args()
train_config = importlib.import_module(args.train)
model_config = importlib.import_module(args.model)
data_config = importlib.import_module(args.data)
verbose_config = importlib.import_module(args.verbose)
if args.caption:
captioning = True
caption_config = importlib.import_module(args.caption)
else:
captioning = False
mBLEU = train_config.mBLEU
if hasattr(train_config, "exp_name"):
exp_name = train_config.exp_name
else:
raise Exception("train config has no exp_name")
exp_name = get_data_name(data_config) + get_model_name(model_config) + get_train_name(train_config)
logdir = os.path.join('log', exp_name)
if not os.path.exists(logdir):
os.makedirs(logdir)
logging_file = os.path.join(logdir, 'logging.txt')
|
py
|
1a5d25d6c16afa7d551fb8effb37e68cfec9afed
|
# APIs for Windows 64-bit MSVC 2013 runtime library (msvcr120).
# Built as a delta from the 32-bit version.
# Format: retval, rettype, callconv, exactname, arglist(type, name)
# arglist type is one of ['int', 'void *']
# arglist name is one of [None, 'funcptr', 'obj', 'ptr']
# List the normalized name of any 32-bit functions to omit.
api_32_omits = [
'msvcr120.??2@yapaxi@z',
'msvcr120.??_u@yapaxi@z',
'msvcr120.??3@yaxpax@z',
'msvcr120.??_v@yaxpax@z'
]
# Define any functions specific to 64-bit.
api_64_adds = {
'msvcr120.??2@yapeax_k@z':( 'int', None, 'cdecl', 'msvcr120.??2@YAPEAX_K@Z', (('int', None),) ),
'msvcr120.??_u@yapeax_k@z':( 'int', None, 'cdecl', 'msvcr120.??_U@YAPEAX_K@Z', (('int', None),) ),
'msvcr120.??3@yaxpeax@z':( 'void', None, 'cdecl', 'msvcr120.??3@YAXPEAX@Z', (('void *', 'ptr'),) ),
'msvcr120.??_v@yaxpeax@z':( 'void', None, 'cdecl', 'msvcr120.??_V@YAXPEAX@Z', (('void *', 'ptr'),) ),
}
# Build from the 32-bit API, skipping omits, changing the calling convention,
# and adding any specific 64-bit functions.
api_defs_64 = {}
import vivisect.impapi.windows.msvcr120_32 as m32
for name in m32.api_defs.iterkeys():
if name in api_32_omits:
continue
(rtype,rname,cconv,cname,cargs) = m32.api_defs[name]
api_defs_64[name] = (rtype, rname, 'msx64call', cname, cargs)
api_defs_64.update(api_64_adds)
|
py
|
1a5d26ec4a4ec6aa2bbbeccb9154dcc36fefc26b
|
import requests
import sys
def request_api(username: str) -> list:
page = 1
followers = []
while True:
url = "https://api.github.com/users/"+username+"/followers?page="+str(page)+"&per_page=100"
req = requests.get(url)
if req.status_code == 200:
data = req.json()
if len(data) == 0:
break
else:
followers += data
page += 1
else:
print("URL: " + url)
print("request status_code:"+str(req.status_code))
sys.exit(-1)
return followers
|
py
|
1a5d274b454846ba664ecd351874b818ec787921
|
import sys
n, m, *sc = map(int, sys.stdin.read().split())
sc = zip(*[iter(sc)] * 2)
def main():
res = [0] * n
determined = set()
for s, c in sc:
if s in determined:
if res[s-1] != c:
return -1
else:
res[s-1] = c
determined.add(s)
if n > 1 and not res[0] == 0:
return -1
res = ''.join([str(d) for d in res])
return res
if __name__ == '__main__':
ans = main()
print(ans)
|
py
|
1a5d28a61f973272951cb3fff1766f85f307789f
|
_base_ = [
'../_base_/datasets/waymo_cars_and_peds.py',
'../_base_/models/pointnet2_seg.py',
'../_base_/schedules/cyclic_20e.py', '../_base_/default_runtime.py'
]
# data settings
data = dict(samples_per_gpu=16)
evaluation = dict(interval=50)
# runtime settings
checkpoint_config = dict(interval=50)
# PointNet2-MSG needs longer training time than PointNet2-SSG
runner = dict(type='EpochBasedRunner', max_epochs=1000)
#resume_from = 'work_dirs/pointnet2_cars_and_peds/latest.pth'
|
py
|
1a5d28b1b1c13b938263d2ba5d8b1cf14898dfba
|
import unittest
from panelexpr._utils.utils import *
from panelexpr.base.operator import TimeSeriesOperator
import panelexpr as pe
import pandas as pd
THRESHOLD = 1e-6
class MyMovingAverageOperator(TimeSeriesOperator):
def eval(self, series: pd.Series, window):
s = series.rolling(window).mean()
return s
pe.register("my_ma", MyMovingAverageOperator)
class BasicTest(unittest.TestCase): # 继承unittest.TestCase
@classmethod
def setUpClass(cls):
# 必须使用@classmethod 装饰器,所有test运行前运行一次
cls.data = pd.read_csv("data/sample_zh_2.csv")
def test_rolling_mean(self):
s1 = pe.eval("mmean(Open, 2, group_by='windcode')", data=self.data)
s2 = pe.eval("my_ma(Open, 2)", data=self.data, group_tag="windcode")
v = mean_absolute_deviation(s1, s2)
matching = nan_matching(s1, s2)
self.assertTrue(v < THRESHOLD)
self.assertTrue(matching)
if __name__ == '__main__':
unittest.main() # 运行所有的测试用例
|
py
|
1a5d29379e009e1778283a10c278719020e120b9
|
from django.db import models
from mongoengine import DynamicDocument, ListField, StringField
class CollectionList(DynamicDocument):
meta = {'collection' : 'CollectionList'}
collectionlist = ListField(StringField())
class testcl(DynamicDocument): # raw tweet data
meta = {'collection' : 'testcl'}
text = StringField()
class rt_eliminated(DynamicDocument): # retweets are eliminated
meta = {'collection' : 'rt_eliminated'}
text = StringField()
class duplicates_eliminated(DynamicDocument): # duplicates are eliminated
meta = {'collection' : 'duplicates_eliminated'}
text = StringField()
class Clusters(DynamicDocument):
meta = {'abstract': True,}
ctweettuplelist = ListField(StringField())
cstr = StringField()
cno = StringField()
cnoprefix = StringField()
rif = ListField(StringField())
twids = ListField(StringField())
user_entropy = StringField()
label = StringField()
class all_data_clusters(Clusters):
meta = {'collection': 'all_data_clusters'}
class genocide_clusters_20151005(Clusters):
meta = {'collection': 'genocide_clusters_20151005'}
|
py
|
1a5d29505cc8a234f652af85664745f8ced3dbab
|
from pytorch_toolbelt.modules import ABN
from pytorch_toolbelt.modules import decoders as D
from pytorch_toolbelt.modules import encoders as E
from torch import nn
from torch.nn import functional as F
from ..dataset import OUTPUT_MASK_32_KEY, OUTPUT_MASK_KEY
from catalyst.registry import Model
__all__ = ["DeeplabV3SegmentationModel", "resnet34_deeplab128", "seresnext101_deeplab256"]
class DeeplabV3SegmentationModel(nn.Module):
def __init__(
self,
encoder: E.EncoderModule,
num_classes: int,
dropout=0.25,
abn_block=ABN,
high_level_bottleneck=256,
low_level_bottleneck=32,
full_size_mask=True,
):
super().__init__()
self.encoder = encoder
self.decoder = D.DeeplabV3Decoder(
feature_maps=encoder.output_filters,
output_stride=encoder.output_strides[-1],
num_classes=num_classes,
high_level_bottleneck=high_level_bottleneck,
low_level_bottleneck=low_level_bottleneck,
abn_block=abn_block,
dropout=dropout,
)
self.full_size_mask = full_size_mask
def forward(self, x):
enc_features = self.encoder(x)
# Decode mask
mask, dsv = self.decoder(enc_features)
if self.full_size_mask:
mask = F.interpolate(mask, size=x.size()[2:], mode="bilinear", align_corners=False)
output = {OUTPUT_MASK_KEY: mask, OUTPUT_MASK_32_KEY: dsv}
return output
@Model
def resnet34_deeplab128(num_classes=1, dropout=0.0, pretrained=True):
encoder = E.Resnet34Encoder(pretrained=pretrained)
return DeeplabV3SegmentationModel(encoder, num_classes=num_classes, high_level_bottleneck=128, dropout=dropout)
@Model
def seresnext101_deeplab256(num_classes=1, dropout=0.0, pretrained=True):
encoder = E.SEResNeXt101Encoder(pretrained=pretrained)
return DeeplabV3SegmentationModel(encoder, num_classes=num_classes, high_level_bottleneck=256, dropout=dropout)
|
py
|
1a5d29bf369f61e0464cc4c7a15dba3a789b8482
|
"""Script to transform and upload IRENA's capacity data to Resource Watch.
IRENA information is available through a Tableau applet.
This data must be downloaded manually, it is not possible to acquire
through an HTTP GET as we can tell.
Once downloaded, only minor transformation is needed to prepare it for upload.
The core issue is that the information does not fall into a data cube without
aggregating some rows to fit with expectations around data dimensionality.
It seems the data should be keyed on the dimensions:
- country
- year
- most granular technology (e.g. "offshore wind" not "wind")
- on-grid/off-grid
When keyed in this way there are still many compound keys
that have multiple rows and need to be summed to produce the
values expressed in Tableau visualization.
"""
import os
import pandas as pd
from zipfile import ZipFile
import shutil
utils_path = os.path.join(os.path.abspath(os.getenv('PROCESSING_DIR')),'utils')
if utils_path not in sys.path:
sys.path.append(utils_path)
import util_files
import util_cloud
import util_carto
import logging
# Set up logging
# Get the top-level logger object
logger = logging.getLogger()
for handler in logger.handlers: logger.removeHandler(handler)
logger.setLevel(logging.INFO)
# make it print to the console.
console = logging.StreamHandler()
logger.addHandler(console)
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# name of table on Carto where you want to upload data
# this should be a table name that is not currently in use
dataset_name = 'ene_009_renewable_generation_annually'
logger.info('Executing script for dataset: ' + dataset_name)
# create a new sub-directory within your specified dir called 'data'
# within this directory, create files to store raw and processed data
data_dir = util_files.prep_dirs(dataset_name)
'''
Download data and save to your data directory
'''
# data can be downloaded by following the steps in the 'Data Acquisition' section of the README file
# generate path to downloaded data file
download = os.path.join(os.path.expanduser("~"), 'Downloads', 'Export_Full_Data_data.csv')
# Move this file into your data directory
raw_data_file = os.path.join(data_dir, os.path.basename(download))
shutil.move(download,raw_data_file)
'''
Process data
'''
# read in csv file as Dataframe
df = pd.read_csv(raw_data_file, dtype=str)
# filter pumped storage plants just like IRENA default
df = df[df['Sub-technology'] != 'Pumped Storage']
# convert values from string to float because summing later
df['Values_asfloat'] = df['Values'].astype(float)
# subset into generation
generation_data = df[df['DataType'] == 'Electricity Generation']
# assuming GWh everywhere, check that; yes the field name has a space at the end
assert (generation_data['Unit '] == 'GWh').all()
# group by the key dimensions
grouped = generation_data.groupby(['ISO', 'Years', 'Sub-technology', 'Type'])
# ensure Technology is mapped 1:1 with Sub-technology
assert grouped.agg({'Technology': lambda x: len(set(x)) == 1}).Technology.all()
# create the data frame, renaming values and organizing the column order
data = grouped.agg({
'Values_asfloat': 'sum', # sum the numeric capacity value
'IRENA Label': 'first', # take a long name for the country
'Technology': 'first', # take the technology (superclass)
}).reset_index().rename(columns={
'ISO': 'iso_a3',
'Years': 'year',
'Sub-technology': 'subtechnology',
'Technology': 'technology',
'Type': 'grid_connection',
'IRENA Label': 'country_name',
'Values_asfloat': 'generation_GWh',
})[[ # set a new column order
'iso_a3', # key
'country_name', # 1:1 with iso_a3
'year', # key
'subtechnology', # key
'technology', # 1:n with subtechnology
'grid_connection', # key
'generation_GWh' # the numeric generation value in gigawatt-hours
]]
#save processed dataset to csv
processed_data_file = os.path.join(data_dir, dataset_name+'_edit.csv')
data.to_csv(processed_data_file, index=False)
'''
Upload processed data to Carto
'''
logger.info('Uploading processed data to Carto.')
util_carto.upload_to_carto(processed_data_file, 'LINK')
'''
Upload original data and processed data to Amazon S3 storage
'''
# initialize AWS variables
aws_bucket = 'wri-public-data'
s3_prefix = 'resourcewatch/'
logger.info('Uploading original data to S3.')
# Upload raw data file to S3
# Copy the raw data into a zipped file to upload to S3
raw_data_dir = os.path.join(data_dir, dataset_name+'.zip')
with ZipFile(raw_data_dir,'w') as zip:
zip.write(raw_data_file, os.path.basename(raw_data_file))
# Upload raw data file to S3
uploaded = util_cloud.aws_upload(raw_data_dir, aws_bucket, s3_prefix+os.path.basename(raw_data_dir))
logger.info('Uploading processed data to S3.')
# Copy the processed data into a zipped file to upload to S3
processed_data_dir = os.path.join(data_dir, dataset_name+'_edit.zip')
with ZipFile(processed_data_dir,'w') as zip:
zip.write(processed_data_file, os.path.basename(processed_data_file))
# Upload processed data file to S3
uploaded = util_cloud.aws_upload(processed_data_dir, aws_bucket, s3_prefix+os.path.basename(processed_data_dir))
|
py
|
1a5d2ab351ba6168385f24bba1a80fdd7f8e8a4a
|
from fastapi import APIRouter, Request
from app.dependencies import templates
from app.internal.celebrity import get_today_month_and_day
router = APIRouter()
@router.get("/celebrity")
def celebrity(request: Request):
today = get_today_month_and_day()
return templates.TemplateResponse("celebrity.html", {
"request": request,
"date": today
})
|
py
|
1a5d2adb4296f6091acb359150080e4003ad0639
|
#!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Copyright (c) 2017-2018 The Cryptomiles Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test logic for setting nMinimumChainWork on command line.
Nodes don't consider themselves out of "initial block download" until
their active chain has more work than nMinimumChainWork.
Nodes don't download blocks from a peer unless the peer's best known block
has more work than nMinimumChainWork.
While in initial block download, nodes won't relay blocks to their peers, so
test that this parameter functions as intended by verifying that block relay
only succeeds past a given node once its nMinimumChainWork has been exceeded.
"""
import time
from test_framework.test_framework import CryptomilesTestFramework
from test_framework.util import sync_blocks, connect_nodes, assert_equal
# 2 hashes required per regtest block (with no difficulty adjustment)
REGTEST_WORK_PER_BLOCK = 2
class MinimumChainWorkTest(CryptomilesTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
self.extra_args = [[], ["-minimumchainwork=0x65"], ["-minimumchainwork=0x65"]]
self.node_min_work = [0, 101, 101]
def setup_network(self):
# This test relies on the chain setup being:
# node0 <- node1 <- node2
# Before leaving IBD, nodes prefer to download blocks from outbound
# peers, so ensure that we're mining on an outbound peer and testing
# block relay to inbound peers.
self.setup_nodes()
for i in range(self.num_nodes-1):
connect_nodes(self.nodes[i+1], i)
def run_test(self):
# Start building a chain on node0. node2 shouldn't be able to sync until node1's
# minchainwork is exceeded
starting_chain_work = REGTEST_WORK_PER_BLOCK # Genesis block's work
self.log.info("Testing relay across node %d (minChainWork = %d)", 1, self.node_min_work[1])
starting_blockcount = self.nodes[2].getblockcount()
num_blocks_to_generate = int((self.node_min_work[1] - starting_chain_work) / REGTEST_WORK_PER_BLOCK)
self.log.info("Generating %d blocks on node0", num_blocks_to_generate)
hashes = self.nodes[0].generate(num_blocks_to_generate)
self.log.info("Node0 current chain work: %s", self.nodes[0].getblockheader(hashes[-1])['chainwork'])
# Sleep a few seconds and verify that node2 didn't get any new blocks
# or headers. We sleep, rather than sync_blocks(node0, node1) because
# it's reasonable either way for node1 to get the blocks, or not get
# them (since they're below node1's minchainwork).
time.sleep(3)
self.log.info("Verifying node 2 has no more blocks than before")
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
# Node2 shouldn't have any new headers yet, because node1 should not
# have relayed anything.
assert_equal(len(self.nodes[2].getchaintips()), 1)
assert_equal(self.nodes[2].getchaintips()[0]['height'], 0)
assert self.nodes[1].getbestblockhash() != self.nodes[0].getbestblockhash()
assert_equal(self.nodes[2].getblockcount(), starting_blockcount)
self.log.info("Generating one more block")
self.nodes[0].generate(1)
self.log.info("Verifying nodes are all synced")
# Because nodes in regtest are all manual connections (eg using
# addnode), node1 should not have disconnected node0. If not for that,
# we'd expect node1 to have disconnected node0 for serving an
# insufficient work chain, in which case we'd need to reconnect them to
# continue the test.
self.sync_all()
self.log.info("Blockcounts: %s", [n.getblockcount() for n in self.nodes])
if __name__ == '__main__':
MinimumChainWorkTest().main()
|
py
|
1a5d2af5dd5f7f43da155d561a05d02610123337
|
"""This module implements the AppFutures.
We have two basic types of futures:
1. DataFutures which represent data objects
2. AppFutures which represent the futures on App/Leaf tasks.
"""
from concurrent.futures import Future
import logging
import threading
from parsl.app.errors import RemoteExceptionWrapper
logger = logging.getLogger(__name__)
# Possible future states (for internal use by the futures package).
PENDING = 'PENDING'
RUNNING = 'RUNNING'
# The future was cancelled by the user...
CANCELLED = 'CANCELLED'
# ...and _Waiter.add_cancelled() was called by a worker.
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
FINISHED = 'FINISHED'
_STATE_TO_DESCRIPTION_MAP = {
PENDING: "pending",
RUNNING: "running",
CANCELLED: "cancelled",
CANCELLED_AND_NOTIFIED: "cancelled",
FINISHED: "finished"
}
class AppFuture(Future):
"""An AppFuture wraps a sequence of Futures which may fail and be retried.
An AppFuture starts with no parent future. A sequence of parent futures may
be assigned by code outside of this class, by passing that new parent future
into "update_future".
The AppFuture will set its result to the result of the parent future, if that
parent future completes without an exception. This result setting should
cause .result(), .exception() and done callbacks to fire as expected when a
Future has a result set.
The AppFuture will not set its result to the result of the parent future, if
that parent future completes with an exception, and if that parent future
has retries left. In that case, no result(), exception() or done callbacks should
report a result.
The AppFuture will set its result to the result of the parent future, if that
parent future completes with an exception and if that parent future has no
retries left, or if it has no retry field. .result(), .exception() and done callbacks
should give a result as expected when a Future has a result set
The parent future may return a RemoteExceptionWrapper as a result
and AppFuture will treat this an an exception for the above
retry and result handling behaviour.
"""
def __init__(self, tid=None, stdout=None, stderr=None):
"""Initialize the AppFuture.
Args:
KWargs:
- tid (Int) : Task id should be any unique identifier. Now Int.
- stdout (str) : Stdout file of the app.
Default: None
- stderr (str) : Stderr file of the app.
Default: None
"""
self._tid = tid
super().__init__()
self.parent = None
self._update_lock = threading.Lock()
self._outputs = []
self._stdout = stdout
self._stderr = stderr
def parent_callback(self, executor_fu):
"""Callback from a parent future to update the AppFuture.
Used internally by AppFuture, and should not be called by code using AppFuture.
Args:
- executor_fu (Future): Future returned by the executor along with callback.
This may not be the current parent future, as the parent future may have
already been updated to point to a retrying execution, and in that case,
this is logged.
In the case that a new parent has been attached, we must immediately discard
this result no matter what it contains (although it might be interesting
to log if it was successful...)
Returns:
- None
Updates the super() with the result() or exception()
"""
with self._update_lock:
if not executor_fu.done():
raise ValueError("done callback called, despite future not reporting itself as done")
# this is for consistency checking
if executor_fu != self.parent:
if executor_fu.exception() is None and not isinstance(executor_fu.result(), RemoteExceptionWrapper):
# ... then we completed with a value, not an exception or wrapped exception,
# but we've got an updated executor future.
# This is bad - for example, we've started a retry even though we have a result
raise ValueError("internal consistency error: AppFuture done callback called without an exception, but parent has been changed since then")
try:
res = executor_fu.result()
if isinstance(res, RemoteExceptionWrapper):
res.reraise()
super().set_result(executor_fu.result())
except Exception as e:
if executor_fu.retries_left > 0:
# ignore this exception, because assume some later
# parent executor, started external to this class,
# will provide the answer
pass
else:
super().set_exception(e)
@property
def stdout(self):
return self._stdout
@property
def stderr(self):
return self._stderr
@property
def tid(self):
return self._tid
def update_parent(self, fut):
"""Add a callback to the parent to update the state.
This handles the case where the user has called result on the AppFuture
before the parent exists.
"""
self.parent = fut
try:
fut.add_done_callback(self.parent_callback)
except Exception as e:
logger.error("add_done_callback got an exception {} which will be ignored".format(e))
def cancel(self):
raise NotImplementedError("Cancel not implemented")
def cancelled(self):
return False
def running(self):
if self.parent:
return self.parent.running()
else:
return False
@property
def outputs(self):
return self._outputs
def __repr__(self):
return '<%s super=%s parent=%s>' % (
self.__class__.__name__,
super().__repr__(),
self.parent.__repr__())
|
py
|
1a5d2af8d84e09dcef2a650c2b8d3ac717f793f6
|
import hashlib
def hash_string(string):
return hashlib.md5(string.encode("utf")).hexdigest()
def read_file(file_path, allow_error=False):
try:
with open(file_path, 'r') as myfile:
return myfile.read()
except Exception as e:
if not allow_error:
raise Exception(str(e))
return None
|
py
|
1a5d2b5d8c9fe12023a87d974321190e4ef81d55
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cpovc_ovc', '0002_remove_ovcregistration_art_status'),
]
operations = [
migrations.AddField(
model_name='ovcregistration',
name='art_status',
field=models.CharField(max_length=4, null=True),
),
]
|
py
|
1a5d2b6620408cfd6fd472d06e0069cae2beded9
|
"""Posterior/Prior predictive plot."""
from numbers import Integral
import platform
import logging
import numpy as np
from .plot_utils import (
xarray_var_iter,
_scale_fig_size,
default_grid,
filter_plotters_list,
get_plotting_function,
)
from ..utils import _var_names
_log = logging.getLogger(__name__)
def plot_ppc(
data,
kind="kde",
alpha=None,
mean=True,
figsize=None,
textsize=None,
data_pairs=None,
var_names=None,
coords=None,
flatten=None,
flatten_pp=None,
num_pp_samples=None,
random_seed=None,
jitter=None,
animated=False,
animation_kwargs=None,
legend=True,
ax=None,
backend=None,
backend_kwargs=None,
group="posterior",
show=None,
):
"""
Plot for posterior/prior predictive checks.
Parameters
----------
data : az.InferenceData object
InferenceData object containing the observed and posterior/prior predictive data.
kind : str
Type of plot to display (kde, cumulative, or scatter). Defaults to kde.
alpha : float
Opacity of posterior/prior predictive density curves.
Defaults to 0.2 for kind = kde and cumulative, for scatter defaults to 0.7
mean : bool
Whether or not to plot the mean posterior/prior predictive distribution. Defaults to True
figsize : tuple
Figure size. If None it will be defined automatically.
textsize: float
Text size scaling factor for labels, titles and lines. If None it will be
autoscaled based on figsize.
data_pairs : dict
Dictionary containing relations between observed data and posterior/prior predictive data.
Dictionary structure:
- key = data var_name
- value = posterior/prior predictive var_name
For example, `data_pairs = {'y' : 'y_hat'}`
If None, it will assume that the observed data and the posterior/prior
predictive data have the same variable name.
var_names : list
List of variables to be plotted. Defaults to all observed variables in the
model if None.
coords : dict
Dictionary mapping dimensions to selected coordinates to be plotted.
Dimensions without a mapping specified will include all coordinates for
that dimension. Defaults to including all coordinates for all
dimensions if None.
flatten : list
List of dimensions to flatten in observed_data. Only flattens across the coordinates
specified in the coords argument. Defaults to flattening all of the dimensions.
flatten_pp : list
List of dimensions to flatten in posterior_predictive/prior_predictive. Only flattens
across the coordinates specified in the coords argument. Defaults to flattening all
of the dimensions. Dimensions should match flatten excluding dimensions for data_pairs
parameters. If flatten is defined and flatten_pp is None, then `flatten_pp=flatten`.
num_pp_samples : int
The number of posterior/prior predictive samples to plot. For `kind` = 'scatter' and
`animation = False` if defaults to a maximum of 5 samples and will set jitter to 0.7
unless defined otherwise. Otherwise it defaults to all provided samples.
random_seed : int
Random number generator seed passed to numpy.random.seed to allow
reproducibility of the plot. By default, no seed will be provided
and the plot will change each call if a random sample is specified
by `num_pp_samples`.
jitter : float
If kind is "scatter", jitter will add random uniform noise to the height
of the ppc samples and observed data. By default 0.
animated : bool
Create an animation of one posterior/prior predictive sample per frame. Defaults to False.
animation_kwargs : dict
Keywords passed to `animation.FuncAnimation`.
legend : bool
Add legend to figure. By default True.
ax: axes, optional
Matplotlib axes or bokeh figures.
backend: str, optional
Select plotting backend {"matplotlib","bokeh"}. Default "matplotlib".
backend_kwargs: bool, optional
These are kwargs specific to the backend being used. For additional documentation
check the plotting method of the backend.
group : {"prior", "posterior"}, optional
Specifies which InferenceData group should be plotted. Defaults to 'posterior'.
Other value can be 'prior'.
show : bool, optional
Call backend show function.
Returns
-------
axes : matplotlib axes or bokeh figures
Examples
--------
Plot the observed data KDE overlaid on posterior predictive KDEs.
.. plot::
:context: close-figs
>>> import arviz as az
>>> data = az.load_arviz_data('radon')
>>> az.plot_ppc(data,data_pairs={"obs":"obs"})
>>> #az.plot_ppc(data,data_pairs={"obs":"obs_hat"})
Plot the overlay with empirical CDFs.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='cumulative')
Use the coords and flatten parameters to plot selected variable dimensions
across multiple plots.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, coords={'observed_county': ['ANOKA', 'BELTRAMI']}, flatten=[])
Plot the overlay using a stacked scatter plot that is particularly useful
when the sample sizes are small.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, kind='scatter', flatten=[],
>>> coords={'observed_county': ['AITKIN', 'BELTRAMI']})
Plot random posterior predictive sub-samples.
.. plot::
:context: close-figs
>>> az.plot_ppc(data, num_pp_samples=30, random_seed=7)
"""
if group not in ("posterior", "prior"):
raise TypeError("`group` argument must be either `posterior` or `prior`")
for groups in ("{}_predictive".format(group), "observed_data"):
if not hasattr(data, groups):
raise TypeError(
'`data` argument must have the group "{group}" for ppcplot'.format(group=groups)
)
if kind.lower() not in ("kde", "cumulative", "scatter"):
raise TypeError("`kind` argument must be either `kde`, `cumulative`, or `scatter`")
if data_pairs is None:
data_pairs = {}
if animation_kwargs is None:
animation_kwargs = {}
if platform.system() == "Linux":
animation_kwargs.setdefault("blit", True)
else:
animation_kwargs.setdefault("blit", False)
if animated and backend == "bokeh":
raise TypeError("Animation option is only supported with matplotlib backend.")
if animated and animation_kwargs["blit"] and platform.system() != "Linux":
_log.warning(
"If you experience problems rendering the animation try setting"
"`animation_kwargs({'blit':False}) or changing the plotting backend (e.g. to TkAgg)"
)
if alpha is None:
if animated:
alpha = 1
else:
if kind.lower() == "scatter":
alpha = 0.7
else:
alpha = 0.2
if jitter is None:
jitter = 0.0
assert jitter >= 0.0
observed = data.observed_data
if group == "posterior":
predictive_dataset = data.posterior_predictive
elif group == "prior":
predictive_dataset = data.prior_predictive
if var_names is None:
var_names = list(observed.data_vars)
var_names = _var_names(var_names, observed)
pp_var_names = [data_pairs.get(var, var) for var in var_names]
pp_var_names = _var_names(pp_var_names, predictive_dataset)
if flatten_pp is None and flatten is None:
flatten_pp = list(predictive_dataset.dims.keys())
elif flatten_pp is None:
flatten_pp = flatten
if flatten is None:
flatten = list(observed.dims.keys())
if coords is None:
coords = {}
if random_seed is not None:
np.random.seed(random_seed)
total_pp_samples = predictive_dataset.sizes["chain"] * predictive_dataset.sizes["draw"]
if num_pp_samples is None:
if kind == "scatter" and not animated:
num_pp_samples = min(5, total_pp_samples)
else:
num_pp_samples = total_pp_samples
if (
not isinstance(num_pp_samples, Integral)
or num_pp_samples < 1
or num_pp_samples > total_pp_samples
):
raise TypeError(
"`num_pp_samples` must be an integer between 1 and "
+ "{limit}.".format(limit=total_pp_samples)
)
pp_sample_ix = np.random.choice(total_pp_samples, size=num_pp_samples, replace=False)
for key in coords.keys():
coords[key] = np.where(np.in1d(observed[key], coords[key]))[0]
obs_plotters = filter_plotters_list(
list(
xarray_var_iter(
observed.isel(coords), skip_dims=set(flatten), var_names=var_names, combined=True
)
),
"plot_ppc",
)
length_plotters = len(obs_plotters)
pp_plotters = [
tup
for _, tup in zip(
range(length_plotters),
xarray_var_iter(
predictive_dataset.isel(coords),
var_names=pp_var_names,
skip_dims=set(flatten_pp),
combined=True,
),
)
]
rows, cols = default_grid(length_plotters)
(figsize, ax_labelsize, _, xt_labelsize, linewidth, markersize) = _scale_fig_size(
figsize, textsize, rows, cols
)
ppcplot_kwargs = dict(
ax=ax,
length_plotters=length_plotters,
rows=rows,
cols=cols,
figsize=figsize,
animated=animated,
obs_plotters=obs_plotters,
pp_plotters=pp_plotters,
predictive_dataset=predictive_dataset,
pp_sample_ix=pp_sample_ix,
kind=kind,
alpha=alpha,
linewidth=linewidth,
mean=mean,
xt_labelsize=xt_labelsize,
ax_labelsize=ax_labelsize,
jitter=jitter,
total_pp_samples=total_pp_samples,
legend=legend,
markersize=markersize,
animation_kwargs=animation_kwargs,
num_pp_samples=num_pp_samples,
backend_kwargs=backend_kwargs,
show=show,
)
if backend == "bokeh":
ppcplot_kwargs.pop("animated")
ppcplot_kwargs.pop("animation_kwargs")
ppcplot_kwargs.pop("legend")
ppcplot_kwargs.pop("xt_labelsize")
ppcplot_kwargs.pop("ax_labelsize")
# TODO: Add backend kwargs
plot = get_plotting_function("plot_ppc", "ppcplot", backend)
axes = plot(**ppcplot_kwargs)
return axes
|
py
|
1a5d2cc28bd3b7ade5468ca9669e38e94239239d
|
import warnings
import json
import os
from vaex.utils import _ensure_strings_from_expressions
class DataFrameAccessorML(object):
def __init__(self, df):
self.df = df
def state_transfer(self):
from .transformations import StateTransfer
state = self.df.state_get()
state.pop('active_range') # we are not interested in this..
return StateTransfer(state=state)
def train_test_split(self, test_size=0.2, strings=True, virtual=True, verbose=True):
'''Will split the DataFrame in train and test part, assuming it is shuffled.
:param test_size: The fractional size of the test set.
:param strings: If True, the output DataFrames will also contain string columns, if any.
:param virtual: If True, the output DataFrames will also contain virtual contain, if any.
:param verbose: If True, print warnings to screen.
'''
if verbose:
warnings.warn('Make sure the DataFrame is shuffled')
initial = None
try:
assert self.df.filtered is False, 'Filtered DataFrames are not yet supported.'
# full_length = len(self)
df = self.df.trim()
initial = self.df.get_active_range()
df.set_active_fraction(test_size)
test = df.trim()
__, end = df.get_active_range()
df.set_active_range(end, df.length_original())
train = df.trim()
finally:
if initial is not None:
df.set_active_range(*initial)
return train, test
filename_spec = os.path.join(os.path.dirname(__file__), 'spec.json')
if os.path.exists(filename_spec):
# add DataFrameAccessorML.<snake_name> wrapper methods
with open(filename_spec) as f:
try:
spec = json.load(f)
except json.decoder.JSONDecodeError:
pass # we are generating the file probably
else:
for class_spec in spec:
def closure(class_spec=class_spec):
def wrapper(self, features=None, transform=True, **kwargs):
kwargs = kwargs.copy() # we do modifications, so make a copy
features = features or self.df.get_column_names()
features = _ensure_strings_from_expressions(features)
import importlib
module = importlib.import_module(class_spec['module'])
cls = getattr(module, class_spec['classname'])
if 'target' in kwargs:
kwargs['target'] = str(kwargs['target'])
object = cls(features=features, **kwargs)
object.fit(self.df)
if transform:
dft = object.transform(self.df)
return dft
else:
return object
# Append trait help strings to the docstring
doc = '\n'
for trait in class_spec['traits']:
doc += f':param {trait["name"]}: {trait["help"]} \n'
doc += ':param transform: If True, return a shallow copy of the transformed DataFrame, otherwise the return fitted transformer. \n'
try:
wrapper.__doc__= class_spec['doc'] + doc
except TypeError: # unsupported operand type(s) for +: 'NoneType' and 'str'
wrapper.__doc__= doc
return wrapper
accessor = DataFrameAccessorML
name = class_spec['snake_name']
setattr(accessor, name, closure())
from .transformations import PCA, PCAIncremental, RandomProjections
from .transformations import StandardScaler, MinMaxScaler, MaxAbsScaler, RobustScaler
from .transformations import LabelEncoder, OneHotEncoder, MultiHotEncoder, FrequencyEncoder
from .transformations import CycleTransformer
from .transformations import BayesianTargetEncoder
from .transformations import WeightOfEvidenceEncoder
from .transformations import GroupByTransformer, KBinsDiscretizer
from .pipeline import Pipeline
|
py
|
1a5d2d25347230853171984c4f83a08c25e4f364
|
from setuptools import setup
from hdf54bats import __version__
setup(name='hdf54bats',
version=__version__,
description='HDF5 for bats, a part of the CloudedBats.org project.',
url='https://github.com/cloudedbats/cloudedbats_hdf5',
author='Arnold Andreasson',
author_email='[email protected]',
license='MIT',
packages=['hdf54bats'],
install_requires=[
'numpy',
'tables',
],
zip_safe=False)
|
py
|
1a5d2df23fc9c5e4643a3f0be6ea8124e598b863
|
#!/usr/bin/python
# Compresses the files for one game into a single JavaScript file.
#
# Copyright 2013 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This script generates two files:
# compressed.js
# uncompressed.js
# The compressed file is a concatenation of all the relevant JavaScript which
# has then been run through Google's Closure Compiler.
# The uncompressed file is a script that loads in each JavaScript file
# one by one. This takes much longer for a browser to load, but is useful
# when debugging code since line numbers are meaningful and variables haven't
# been renamed. The uncompressed file also allows for a faster development
# cycle since there is no need to rebuild or recompile, just reload.
import json
import os.path
import re
import subprocess
import sys
import threading
# Define a warning message for all the generated files.
WARNING = '// Automatically generated file. Do not edit!\n'
messageNames = []
def main(name, lang):
if lang != None:
filterMessages(name, lang)
language(name, lang)
else:
# Extract the list of supported languages from boot.js.
# This is a bit fragile.
boot = open('appengine/common/boot.js', 'r')
js = ' '.join(boot.readlines())
boot.close()
m = re.search('\[\'BlocklyGamesLanguages\'\] = (\[[-,\'\\s\\w]+\])', js)
if not m:
raise Exception("Can't find BlocklyGamesLanguages in boot.js")
langs = m.group(1)
langs = langs.replace("'", '"')
langs = json.loads(langs)
filterMessages(name, langs[0])
for lang in langs:
language(name, lang)
def filterMessages(name, lang):
global messageNames
# Do a dummy compile and identify all the Blockly messages used.
print("Scanning for Blockly messages in %s..." % name)
f = open('appengine/%s/generated/%s/msg.js' % (name, lang), 'w')
f.write("""
goog.provide('BlocklyGames.Msg');
goog.require('Blockly.Msg');
Blockly.Msg["ybr8uu2q3b"] = '';
""")
f.close()
thread0 = Gen_compressed(name, lang)
thread0.start()
thread0.join()
f = open('appengine/%s/generated/%s/compressed.js' % (name, lang), 'r')
js = f.read()
f.close()
# Locate what Blockly.Msg has compiled into (e.g. h.Y)
m = re.search('([\w.$]+)\.ybr8uu2q3b=', js)
if m:
blocklyMsg = m.group(1)
blocklyMsg = blocklyMsg.replace('.', '\\.').replace('$', '\\$')
msgs1 = re.findall('\W' + blocklyMsg + '.([A-Z0-9_]+)', js);
msgs2 = re.findall('\WBKY_([A-Z0-9_]+)', js);
messageNames = list(set(msgs1 + msgs2))
# Resolve references.
# Blockly.Msg["TEXT_APPEND_VAR"] = Blockly.Msg["VAR_DEFAULT_NAME"];
# Does not handle long chains of references.
msgs = getMessages(lang)
for msg in msgs:
m = re.search('Blockly\.Msg\["([A-Z0-9_]+)"\] = Blockly\.Msg\["([A-Z0-9_]+)"\]', msg)
if m and m.group(1) in messageNames:
messageNames.append(m.group(2))
messageNames.sort()
print("Found %d Blockly messages." % len(messageNames))
def getMessages(lang):
# Read Blockly's message file for this language (default to English).
blocklyMsgFileName = 'appengine/third-party/blockly/msg/js/%s.js' % lang;
if not os.path.exists(blocklyMsgFileName):
blocklyMsgFileName = 'appengine/third-party/blockly/msg/js/en.js';
f = open(blocklyMsgFileName, 'r')
msgs = f.readlines()
f.close()
return msgs
def language(name, lang):
global messageNames
msgs = getMessages(lang)
# Write copy to Blockly Games.
f = open('appengine/%s/generated/%s/msg.js' % (name, lang), 'w')
for msg in msgs:
if msg == "'use strict';\n":
f.write("""'use strict';
goog.provide('BlocklyGames.Msg');
goog.require('Blockly.Msg');
""")
else:
# Only write out messages that are used (as detected in filterMessages).
m = re.search('Blockly\.Msg\["([A-Z0-9_]+)"\] = ', msg)
if not m or m.group(1) in messageNames:
f.write(msg)
f.close()
print('Compiling %s - %s' % (name.title(), lang))
# Run uncompressed and compressed code generation in separate threads.
# For multi-core computers, this offers a significant speed boost.
thread1 = Gen_uncompressed(name, lang)
thread2 = Gen_compressed(name, lang)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
print("")
class Gen_uncompressed(threading.Thread):
def __init__(self, name, lang):
threading.Thread.__init__(self)
self.name = name
self.lang = lang
def run(self):
cmd = ['third-party-downloads/build/closurebuilder.py',
'--root=appengine/third-party/',
'--root=appengine/generated/%s/' % self.lang,
'--root=appengine/js/',
'--namespace=%s' % self.name.replace('/', '.').title(),
'--output_mode=list']
directory = self.name
while directory:
cmd.append('--root=appengine/%s/generated/%s/' % (directory, self.lang))
cmd.append('--root=appengine/%s/js/' % directory)
(directory, sep, fragment) = directory.rpartition(os.path.sep)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except:
raise Exception("Failed to Popen: %s" % ' '.join(cmd))
files = readStdout(proc)
if self.name == 'pond/docs':
path = '../'
else:
path = ''
prefix = 'appengine/'
srcs = []
for file in files:
file = file.strip()
if file[:len(prefix)] == prefix:
file = file[len(prefix):]
else:
raise Exception('"%s" is not in "%s".' % (file, prefix))
srcs.append('"%s%s"' % (path, file))
f = open('appengine/%s/generated/%s/uncompressed.js' %
(self.name, self.lang), 'w')
f.write("""%s
window.CLOSURE_NO_DEPS = true;
(function() {
var srcs = [
%s
];
function loadScript() {
var src = srcs.shift();
if (src) {
var script = document.createElement('script');
script.src = src;
script.type = 'text/javascript';
script.onload = loadScript;
document.head.appendChild(script);
}
}
loadScript();
})();
""" % (WARNING, ',\n '.join(srcs)))
f.close()
print('Found %d dependencies.' % len(srcs))
class Gen_compressed(threading.Thread):
def __init__(self, name, lang):
threading.Thread.__init__(self)
self.name = name
self.lang = lang
def run(self):
cmd = [
'java',
'-jar', 'third-party-downloads/closure-compiler.jar',
'--generate_exports',
'--compilation_level', 'ADVANCED_OPTIMIZATIONS',
'--dependency_mode=PRUNE',
'--externs', 'externs/gviz-externs.js',
'--externs', 'externs/interpreter-externs.js',
'--externs', 'externs/prettify-externs.js',
'--externs', 'externs/soundJS-externs.js',
'--externs', 'externs/storage-externs.js',
'--externs', 'appengine/third-party/blockly/externs/svg-externs.js',
'--language_in', 'ECMASCRIPT5_STRICT',
'--language_out', 'ECMASCRIPT5_STRICT',
'--entry_point=%s' % self.name.replace('/', '.').title(),
"--js='appengine/third-party/**.js'",
"--js='!appengine/third-party/base.js'",
"--js='!appengine/third-party/blockly/externs/**.js'",
"--js='appengine/generated/%s/*.js'" % self.lang,
"--js='appengine/js/*.js'",
'--warning_level', 'QUIET',
]
directory = self.name
while directory:
cmd.append("--js='appengine/%s/generated/%s/*.js'" %
(directory, self.lang))
cmd.append("--js='appengine/%s/js/*.js'" % directory)
(directory, sep, fragment) = directory.rpartition(os.path.sep)
try:
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except:
print("Failed to Popen: %s" % cmd)
raise
script = readStdout(proc)
script = ''.join(script)
script = self.trim_licence(script)
print('Compressed to %d KB.' % (len(script) / 1024))
f = open('appengine/%s/generated/%s/compressed.js' %
(self.name, self.lang), 'w')
f.write(WARNING)
f.write(script)
f.close()
def trim_licence(self, code):
"""Strip out Google's and MIT's Apache licences.
JS Compiler preserves dozens of Apache licences in the Blockly code.
Remove these if they belong to Google or MIT.
MIT's permission to do this is logged in Blockly issue 2412.
Args:
code: Large blob of compiled source code.
Returns:
Code with Google's and MIT's Apache licences trimmed.
"""
apache2 = re.compile("""/\\*
(Copyright \\d+ (Google LLC|Massachusetts Institute of Technology))
( All rights reserved.
)? SPDX-License-Identifier: Apache-2.0
\\*/""")
return re.sub(apache2, '', code)
def readStdout(proc):
data = proc.stdout.readlines()
# Python 2 reads stdout as text.
# Python 3 reads stdout as bytes.
return list(map(lambda line:
type(line) == str and line or str(line, 'utf-8'), data))
if __name__ == '__main__':
if len(sys.argv) == 2:
main(sys.argv[1], None)
elif len(sys.argv) == 3:
main(sys.argv[1], sys.argv[2])
else:
print('Format: %s <appname> [<language>]' % sys.argv[0])
sys.exit(2)
|
py
|
1a5d2e1f12ef0aa80e262c9b47c682d38faadf33
|
from keras.datasets import mnist, fashion_mnist
from models import load_model
import numpy as np
import os
import argparse
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import math
curdir = os.path.dirname(os.path.abspath(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--optimizer', choices=['adam','sgd','adagrad'], default='adam')
parser.add_argument('--loss', choices=['mean_squared_error', 'binary_crossentropy'], default='mean_squared_error')
parser.add_argument('--epochs', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=64)
parser.add_argument('--test_samples', type=int, default=50)
parser.add_argument('--result', default=os.path.join(curdir, 'result.png'))
def main(args):
# prepare normal dataset (Mnist)
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train / 255. # normalize into [0,1]
x_test = x_test / 255.
# prapare abnormal dataset (Fashion Mnist)
(_, _), (x_abnormal, _) = fashion_mnist.load_data()
x_abnormal = x_abnormal / 255.
# sample args.test_samples images from eaech of x_test and x_abnormal
perm = np.random.permutation(args.test_samples)
x_test = x_test[perm][:args.test_samples]
x_abnormal = x_abnormal[perm][:args.test_samples]
# train each model and test their capabilities of anomaly deteciton
model_names = ['autoencoder', 'deep_autoencoder', 'convolutional_autoencoder']
for model_name in model_names:
# instantiate model
model = load_model(model_name)
# reshape input data according to the model's input tensor
if model_name == 'convolutional_autoencoder':
x_train = x_train.reshape(-1,28,28,1)
x_test = x_test.reshape(-1,28,28,1)
x_abnormal = x_abnormal.reshape(-1,28,28,1)
elif model_name == 'autoencoder' or model_name == 'deep_autoencoder':
x_train = x_train.reshape(-1,28*28)
x_test = x_test.reshape(-1,28*28)
x_abnormal = x_abnormal.reshape(-1,28*28)
else:
raise ValueError('Unknown model_name %s was given' % model_name)
# compile model
model.compile(optimizer=args.optimizer, loss=args.loss)
# train on only normal training data
model.fit(
x=x_train,
y=x_train,
epochs=args.epochs,
batch_size=args.batch_size,
)
# test
x_concat = np.concatenate([x_test, x_abnormal], axis=0)
losses = []
for x in x_concat:
# compule loss for each test sample
x = np.expand_dims(x, axis=0)
loss = model.test_on_batch(x, x)
losses.append(loss)
# plot
plt.plot(range(len(losses)), losses, linestyle='-', linewidth=1, label=model_name)
# delete model for saving memory
del model
# create graph
plt.legend(loc='best')
plt.grid()
plt.xlabel('sample index')
plt.ylabel('loss')
plt.savefig(args.result)
plt.clf()
if __name__ == '__main__':
args = parser.parse_args()
main(args)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.