max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 7
115
| max_stars_count
int64 101
368k
| id
stringlengths 2
8
| content
stringlengths 6
1.03M
|
---|---|---|---|---|
RL/plot-single.py | JulianYu123456/icnn | 258 | 12629233 | <reponame>JulianYu123456/icnn<filename>RL/plot-single.py
#!/usr/bin/env python3
import argparse
import os
import numpy as np
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.style.use('bmh')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('expDir', type=str)
parser.add_argument('--xmax', type=float)
parser.add_argument('--ymin', type=float, default=0.0)
parser.add_argument('--ymax', type=float)
args = parser.parse_args()
fig, ax = plt.subplots(1, 1, figsize=(6, 5))
plt.xlabel('Timestep')
plt.ylabel('Reward')
trainP = os.path.join(args.expDir, 'train.log')
trainData = np.loadtxt(trainP).reshape(-1, 2)
testP = os.path.join(args.expDir, 'test.log')
testData = np.loadtxt(testP).reshape(-1, 2)
if trainData.shape[0] > 1:
plt.plot(trainData[:,0], trainData[:,1], label='Train')
if testData.shape[0] > 1:
testI = testData[:,0]
testRew = testData[:,1]
plt.plot(testI, testRew, label='Test')
N = 10
testI_ = testI[N:]
testRew_ = [sum(testRew[i-N:i])/N for i in range(N, len(testRew))]
plt.plot(testI_, testRew_, label='Rolling Test')
plt.ylim([args.ymin, args.ymax])
plt.legend()
fname = os.path.join(args.expDir, 'reward.pdf')
plt.savefig(fname)
print('Created {}'.format(fname))
if __name__ == '__main__':
main()
|
orc8r/gateway/python/scripts/ctraced_cli.py | nstng/magma | 539 | 12629238 | #!/usr/bin/env python3
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import textwrap
from magma.common.rpc_utils import grpc_wrapper
from orc8r.protos import common_pb2
from orc8r.protos.ctraced_pb2 import StartTraceRequest
from orc8r.protos.ctraced_pb2_grpc import CallTraceServiceStub
@grpc_wrapper
def start_call_trace(client, args):
client.StartCallTrace(StartTraceRequest())
@grpc_wrapper
def end_call_trace(client, args):
res = client.EndCallTrace(common_pb2.Void())
print("Result of call trace: ", res)
def create_parser():
"""
Creates the argparse parser with all the arguments.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawTextHelpFormatter,
description=textwrap.dedent('''\
Management CLI for ctraced
--------------------------
Use to start and end call traces.
Options are provided for the type of trace to record.
Only a single trace can be captured at a time.
'''),
)
# Add subcommands
subparsers = parser.add_subparsers(title='subcommands', dest='cmd')
# Add StartCallTrace subcommand
parser_start_trace = subparsers.add_parser(
'start_call_trace',
help='Start a call trace',
)
trace_types = list(StartTraceRequest.TraceType.DESCRIPTOR.values_by_name)
supported_protocols =\
list(StartTraceRequest.ProtocolName.DESCRIPTOR.values_by_name)
supported_interfaces =\
list(StartTraceRequest.InterfaceName.DESCRIPTOR.values_by_name)
parser_start_trace.add_argument(
'--type', type=str, choices=trace_types,
help='Trace type', required=True,
)
parser_start_trace.add_argument(
'--imsi', type=str, choices=trace_types,
help='Trace type',
)
parser_start_trace.add_argument(
'--protocol', type=str,
choices=supported_protocols,
)
parser_start_trace.add_argument(
'--interface', type=str,
choices=supported_interfaces,
)
parser_start_trace.set_defaults(func=start_call_trace)
# Add EndCallTrace subcommand
parser_end_trace = subparsers.add_parser(
'end_call_trace',
help='End a call trace',
)
parser_end_trace.set_defaults(func=end_call_trace)
return parser
def main():
parser = create_parser()
# Parse the args
args = parser.parse_args()
if not args.cmd:
parser.print_usage()
exit(1)
# Execute the subcommand function
args.func(args, CallTraceServiceStub, 'ctraced')
if __name__ == "__main__":
main()
|
anaconda_project/test/test_cli.py | kathatherine/anaconda-project | 188 | 12629268 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2016, Anaconda, Inc. All rights reserved.
#
# Licensed under the terms of the BSD 3-Clause License.
# The full license is in the file LICENSE.txt, distributed with this software.
# -----------------------------------------------------------------------------
from __future__ import absolute_import, print_function
import anaconda_project.cli as cli
def test_main(monkeypatch):
result = {}
def mock_main(*args, **kwargs):
result['args'] = args
result['kwargs'] = kwargs
monkeypatch.setattr('anaconda_project.internal.cli.main.main', mock_main)
cli.main()
assert dict(args=(), kwargs={}) == result
|
Tree/222_CountCompleteTreeNodes.py | cls1991/leetcode | 180 | 12629269 | # coding: utf8
"""
题目链接: https://leetcode.com/problems/count-complete-tree-nodes/description.
题目描述:
Given a complete binary tree, count the number of nodes.
Definition of a complete binary tree from Wikipedia:
In a complete binary tree every level, except possibly the last, is completely filled, and all nodes in the last
level are as far left as possible. It can have between 1 and 2h nodes inclusive at the last level h.
"""
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def countNodes(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return 0
# 满二叉树, 直接返回
# 不然, leetcode大数据样例通不过测试
lh = self.left_tree_height(root)
rh = self.right_tree_height(root)
if lh == rh:
return pow(2, lh) - 1
return 1 + self.countNodes(root.left) + self.countNodes(root.right)
def left_tree_height(self, root):
if not root:
return 0
return 1 + self.left_tree_height(root.left)
def right_tree_height(self, root):
if not root:
return 0
return 1 + self.right_tree_height(root.right)
|
dtech_instagram/InstagramAPI/src/__init__.py | hideki-saito/InstagramAPP_Flask | 126 | 12629275 | from .Checkpoint import Checkpoint
from .Constants import Constants
from .Instagram import Instagram
from .InstagramException import InstagramException
from .InstagramRegistration import InstagramRegistration
from .SignatureUtils import SignatureUtils
from .Utils import *
from .http import *
__all__ = ["Constants", "Instagram", "Checkpoint", "InstagramException", "InstagramRegistration", "http", "Utils"]
|
det3d/models/necks/__init__.py | alsun-oven/CenterPoint | 1,124 | 12629277 | <reponame>alsun-oven/CenterPoint
from .rpn import RPN
__all__ = ["RPN"]
|
lib-python/io/proxies/reliability.py | geoffxy/tandem | 732 | 12629321 | <reponame>geoffxy/tandem
from tandem.shared.io.udp_gateway import UDPGateway
from tandem.shared.io.proxies.base import ProxyBase
from tandem.shared.utils.reliability import ReliabilityUtils
from tandem.shared.stores.reliability import ReliabilityStore
import logging
class ReliabilityProxy(ProxyBase):
def __init__(self, time_scheduler):
self._time_scheduler = time_scheduler
def _handle_ack_timeout(self, ack_id, io_data):
if ReliabilityUtils.should_resend_payload(ack_id):
logging.info("Timeout on ack {}, resending".format(ack_id))
self._interface._write_io_data([io_data])
self._time_scheduler.run_after(
ReliabilityUtils.ACK_TIMEOUT,
self._handle_ack_timeout,
ack_id,
io_data
)
def pre_write_io_data(self, params):
args, kwargs = params
io_datas, = args
should_ack = kwargs.get('reliability', False)
if not should_ack:
return params
new_io_datas = []
for io_data in io_datas:
new_io_data = io_data
new_raw_data, ack_id = ReliabilityUtils.serialize(
io_data.get_data(),
)
new_io_data = UDPGateway.data_class(
new_raw_data,
io_data.get_address(),
)
ReliabilityStore.get_instance().add_payload(ack_id, new_io_data)
self._time_scheduler.run_after(
ReliabilityUtils.ACK_TIMEOUT,
self._handle_ack_timeout,
ack_id,
new_io_data
)
new_io_datas.append(new_io_data)
new_args = (new_io_datas,)
return (new_args, kwargs)
def on_retrieve_io_data(self, params):
args, kwargs = params
raw_data, address = args
if ReliabilityUtils.is_ack(raw_data):
ack_id = ReliabilityUtils.parse_ack(raw_data)
ReliabilityStore.get_instance().remove_payload(ack_id)
return (None, None)
elif ReliabilityUtils.is_ackable(raw_data):
new_raw_data, ack_id = ReliabilityUtils.deserialize(raw_data)
ack_payload = ReliabilityUtils.generate_ack(ack_id)
self._interface.write_io_data([
self._interface.data_class(ack_payload, address),
])
new_args = new_raw_data, address
return (new_args, kwargs)
else:
return params
|
windows/auto_config_geo_for_multiple_wins_app.py | shuge/Qt-Python-Binding-Examples | 179 | 12629334 | #!/usr/bin/env python
"""
auto place secondary window next to primary window
Tested environment:
Mac OS X 10.6.8
http://www.pyside.org/docs/pyside/PySide/QtGui/QWidget.html
http://www.pyside.org/docs/pyside/PySide/QtCore/QRect.html
https://doc.qt.io/qt-5/qdesktopwidget.html
"""
import sys
try:
from PySide import QtCore
from PySide import QtGui
except ImportError:
from PyQt4 import QtCore
from PyQt4 import QtGui
class AnotherWindow(QtGui.QWidget):
def __init__(self, primary_win):
super(AnotherWindow, self).__init__()
self.setWindowTitle('Another Window')
w, h = 300, 400
self.resize(w, h)
self.primary_win = primary_win
def smart_place(self):
screen = QtGui.QApplication.desktop()
primary_win_pos = 'right'
if self.primary_win.x() < screen.width():
left_screen = QtCore.QRect(0, 0, screen.width() / 2, screen.height())
if left_screen.contains(self.primary_win.pos()) or left_screen.contains(self.primary_win.geometry().topRight()):
primary_win_pos = 'left'
y = (screen.height() - self.height() - 100) / 2
if primary_win_pos == 'left':
x = self.primary_win.x() + self.primary_win.width()
else:
x = self.primary_win.x() - self.width()
self.move(x, y)
def show(self):
self.smart_place()
super(AnotherWindow, self).show()
class Demo(QtGui.QMainWindow):
def __init__(self):
super(Demo, self).__init__()
self.resize(300, 300)
self.show_another_win_btn = QtGui.QPushButton("show", self)
self.show_another_win_btn.clicked.connect(self._show_another_win_btn_cb)
self.show_another_win_btn.move(10, 10)
self.another_win = None
def _show_another_win_btn_cb(self):
if not self.another_win:
self.another_win = AnotherWindow(primary_win = self)
self.another_win.show()
def show_and_raise(self):
self.show()
self.raise_()
if __name__ == "__main__":
app = QtGui.QApplication(sys.argv)
demo = Demo()
demo.show_and_raise()
sys.exit(app.exec_())
|
bin/viewer.py | jzw0025/fem-with-python | 148 | 12629348 | <gh_stars>100-1000
#!/usr/bin/env python
import sys
import argparse
from os.path import dirname, realpath, join, isdir
D = realpath(join(dirname(realpath(__file__)), '../'))
assert isdir(join(D, 'femlib'))
sys.path.insert(0, D)
from viewer import launch_viewer
parser = argparse.ArgumentParser(sys.argv[1:])
parser.add_argument("sources", nargs="*")
args = parser.parse_args(sys.argv[1:])
launch_viewer(args.sources)
|
crabageprediction/venv/Lib/site-packages/matplotlib/container.py | 13rianlucero/CrabAgePrediction | 603 | 12629405 | <gh_stars>100-1000
from matplotlib.artist import Artist
import matplotlib.cbook as cbook
class Container(tuple):
"""
Base class for containers.
Containers are classes that collect semantically related Artists such as
the bars of a bar plot.
"""
def __repr__(self):
return ("<{} object of {} artists>"
.format(type(self).__name__, len(self)))
def __new__(cls, *args, **kwargs):
return tuple.__new__(cls, args[0])
def __init__(self, kl, label=None):
self._callbacks = cbook.CallbackRegistry()
self._remove_method = None
self.set_label(label)
def remove(self):
for c in cbook.flatten(
self, scalarp=lambda x: isinstance(x, Artist)):
if c is not None:
c.remove()
if self._remove_method:
self._remove_method(self)
def get_children(self):
return [child for child in cbook.flatten(self) if child is not None]
get_label = Artist.get_label
set_label = Artist.set_label
add_callback = Artist.add_callback
remove_callback = Artist.remove_callback
pchanged = Artist.pchanged
class BarContainer(Container):
"""
Container for the artists of bar plots (e.g. created by `.Axes.bar`).
The container can be treated as a tuple of the *patches* themselves.
Additionally, you can access these and further parameters by the
attributes.
Attributes
----------
patches : list of :class:`~matplotlib.patches.Rectangle`
The artists of the bars.
errorbar : None or :class:`~matplotlib.container.ErrorbarContainer`
A container for the error bar artists if error bars are present.
*None* otherwise.
datavalues : None or array-like
The underlying data values corresponding to the bars.
orientation : {'vertical', 'horizontal'}, default: None
If 'vertical', the bars are assumed to be vertical.
If 'horizontal', the bars are assumed to be horizontal.
"""
def __init__(self, patches, errorbar=None, *, datavalues=None,
orientation=None, **kwargs):
self.patches = patches
self.errorbar = errorbar
self.datavalues = datavalues
self.orientation = orientation
super().__init__(patches, **kwargs)
class ErrorbarContainer(Container):
"""
Container for the artists of error bars (e.g. created by `.Axes.errorbar`).
The container can be treated as the *lines* tuple itself.
Additionally, you can access these and further parameters by the
attributes.
Attributes
----------
lines : tuple
Tuple of ``(data_line, caplines, barlinecols)``.
- data_line : :class:`~matplotlib.lines.Line2D` instance of
x, y plot markers and/or line.
- caplines : tuple of :class:`~matplotlib.lines.Line2D` instances of
the error bar caps.
- barlinecols : list of :class:`~matplotlib.collections.LineCollection`
with the horizontal and vertical error ranges.
has_xerr, has_yerr : bool
``True`` if the errorbar has x/y errors.
"""
def __init__(self, lines, has_xerr=False, has_yerr=False, **kwargs):
self.lines = lines
self.has_xerr = has_xerr
self.has_yerr = has_yerr
super().__init__(lines, **kwargs)
class StemContainer(Container):
"""
Container for the artists created in a :meth:`.Axes.stem` plot.
The container can be treated like a namedtuple ``(markerline, stemlines,
baseline)``.
Attributes
----------
markerline : :class:`~matplotlib.lines.Line2D`
The artist of the markers at the stem heads.
stemlines : list of :class:`~matplotlib.lines.Line2D`
The artists of the vertical lines for all stems.
baseline : :class:`~matplotlib.lines.Line2D`
The artist of the horizontal baseline.
"""
def __init__(self, markerline_stemlines_baseline, **kwargs):
"""
Parameters
----------
markerline_stemlines_baseline : tuple
Tuple of ``(markerline, stemlines, baseline)``.
``markerline`` contains the `.LineCollection` of the markers,
``stemlines`` is a `.LineCollection` of the main lines,
``baseline`` is the `.Line2D` of the baseline.
"""
markerline, stemlines, baseline = markerline_stemlines_baseline
self.markerline = markerline
self.stemlines = stemlines
self.baseline = baseline
super().__init__(markerline_stemlines_baseline, **kwargs)
|
scrypt/test_scrypt.py | sigmoid3/Dapper | 974 | 12629411 | <filename>scrypt/test_scrypt.py
from ethereum import tester as t
import sys
def log_listener(x):
if x['_event_type'] == 'BlockMixInput':
bminputs.append(x['data'])
print len(bminputs), x
else:
print x
s = t.state()
bminputs = []
print 'Creating contract'
c = s.abi_contract('scrypt.se.py', log_listener=log_listener)
print 'Computing hash and getting blockmix inputs'
o = '\x00' * 32
i = 0
while o == '\x00' * 32:
o = c.scrypt("cow")
i += 1
print '%d transactions sent' % i
print 'Checking result correctness'
assert o == "1c989eff71803fb4c9b3e47e611330da1a7d153d2ab5f6bef57dc3253d51cc52".decode('hex'), o.encode('hex')
print 'Success'
|
test_frame/test_decorator_run_example/test_common_no_decorator_example.py | piaoxue1949/distributed_framework | 333 | 12629424 | <filename>test_frame/test_decorator_run_example/test_common_no_decorator_example.py
"""
测试非装饰器版本方式,注意对比装饰器版本test_decorator_task_example.py
"""
from function_scheduling_distributed_framework import get_consumer
def f(a, b):
print(a + b)
consumer = get_consumer('queue_test_f01', consuming_function=f, qps=0.2, broker_kind=0)
for i in range(10, 20):
consumer.publisher_of_same_queue.publish(dict(a=i, b=i * 2))
consumer.start_consuming_message()
|
tests/test_utils/test_general_data.py | JustWeZero/mmdetection | 20,190 | 12629430 | import copy
import numpy as np
import pytest
import torch
from mmdet.core import GeneralData, InstanceData
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return a == b
def test_general_data():
# test init
meta_info = dict(
img_size=[256, 256],
path='dadfaff',
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
instance_data = GeneralData(meta_info=meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# test nice_repr
repr_instance_data = instance_data.new(data=data)
nice_repr = str(repr_instance_data)
for line in nice_repr.split('\n'):
if 'masks' in line:
assert 'shape' in line
assert '(4, 2, 2)' in line
if 'bboxes' in line:
assert 'shape' in line
assert 'torch.Size([4, 4])' in line
if 'path' in line:
assert 'dadfaff' in line
if 'scale_factor' in line:
assert '[1.5 1.5]' in line
instance_data = GeneralData(
meta_info=meta_info, data=dict(bboxes=torch.rand(5)))
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 5
# data should be a dict
with pytest.raises(AssertionError):
GeneralData(data=1)
# test set data
instance_data = GeneralData()
instance_data.set_data(data)
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 4
assert 'masks' in instance_data
assert len(instance_data.masks) == 4
# data should be a dict
with pytest.raises(AssertionError):
instance_data.set_data(data=1)
# test set_meta
instance_data = GeneralData()
instance_data.set_meta_info(meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# can skip same value when overwrite
instance_data.set_meta_info(meta_info)
# meta should be a dict
with pytest.raises(AssertionError):
instance_data.set_meta_info(meta_info='fjhka')
# attribute in `_meta_info_field` is immutable once initialized
instance_data.set_meta_info(meta_info)
# meta should be immutable
with pytest.raises(KeyError):
instance_data.set_meta_info(dict(img_size=[254, 251]))
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['path'] = 'dada'
instance_data.set_meta_info(duplicate_meta_info)
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['scale_factor'] = np.array([1.5, 1.6])
instance_data.set_meta_info(duplicate_meta_info)
# test new_instance_data
instance_data = GeneralData(meta_info)
new_instance_data = instance_data.new()
for k, v in instance_data.meta_info_items():
assert k in new_instance_data
_equal(v, new_instance_data[k])
instance_data = GeneralData(meta_info, data=data)
temp_meta = copy.deepcopy(meta_info)
temp_data = copy.deepcopy(data)
temp_data['time'] = '12212'
temp_meta['img_norm'] = np.random.random(3)
new_instance_data = instance_data.new(meta_info=temp_meta, data=temp_data)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, temp_meta[k])
assert k == 'img_norm'
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'time'
assert _equal(v, temp_data[k])
# test keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'bboxes' in instance_data.keys()
instance_data.b = 10
assert 'b' in instance_data
# test meta keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'path' in instance_data.meta_info_keys()
assert len(instance_data.meta_info_keys()) == len(meta_info)
instance_data.set_meta_info(dict(workdir='fafaf'))
assert 'workdir' in instance_data
assert len(instance_data.meta_info_keys()) == len(meta_info) + 1
# test values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 10 in instance_data.values()
assert len(instance_data.values()) == 1
# test meta values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
# torch 1.3 eq() can not compare str and tensor
from mmdet import digit_version
if digit_version(torch.__version__) >= [1, 4]:
assert 'dadfaff' in instance_data.meta_info_values()
assert len(instance_data.meta_info_values()) == len(meta_info)
# test items
instance_data = GeneralData(data=data)
for k, v in instance_data.items():
assert k in data
assert _equal(v, data[k])
# test meta_info_items
instance_data = GeneralData(meta_info=meta_info)
for k, v in instance_data.meta_info_items():
assert k in meta_info
assert _equal(v, meta_info[k])
# test __setattr__
new_instance_data = GeneralData(data=data)
new_instance_data.mask = torch.rand(3, 4, 5)
new_instance_data.bboxes = torch.rand(2, 4)
assert 'mask' in new_instance_data
assert len(new_instance_data.mask) == 3
assert len(new_instance_data.bboxes) == 2
# test instance_data_field has been updated
assert 'mask' in new_instance_data._data_fields
assert 'bboxes' in new_instance_data._data_fields
for k in data:
assert k in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
new_instance_data._data_fields = None
with pytest.raises(AttributeError):
new_instance_data._meta_info_fields = None
with pytest.raises(AttributeError):
del new_instance_data._data_fields
with pytest.raises(AttributeError):
del new_instance_data._meta_info_fields
# key in _meta_info_field is immutable
new_instance_data.set_meta_info(meta_info)
with pytest.raises(KeyError):
del new_instance_data.img_size
with pytest.raises(KeyError):
del new_instance_data.scale_factor
for k in new_instance_data.meta_info_keys():
with pytest.raises(AttributeError):
new_instance_data[k] = None
# test __delattr__
# test key can be removed in instance_data_field
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data.mask
assert 'mask' not in new_instance_data.keys()
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert not hasattr(new_instance_data, 'mask')
# tset __delitem__
new_instance_data.mask = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data['mask']
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
assert not hasattr(new_instance_data, 'mask')
# test __setitem__
new_instance_data['mask'] = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert hasattr(new_instance_data, 'mask')
# test data_fields has been updated
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
del new_instance_data['_data_fields']
with pytest.raises(AttributeError):
del new_instance_data['_meta_info_field']
# test __getitem__
new_instance_data.mask is new_instance_data['mask']
# test get
assert new_instance_data.get('mask') is new_instance_data.mask
assert new_instance_data.get('none_attribute', None) is None
assert new_instance_data.get('none_attribute', 1) == 1
# test pop
mask = new_instance_data.mask
assert new_instance_data.pop('mask') is mask
assert new_instance_data.pop('mask', None) is None
assert new_instance_data.pop('mask', 1) == 1
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(KeyError):
new_instance_data.pop('_data_fields')
with pytest.raises(KeyError):
new_instance_data.pop('_meta_info_field')
# attribute in `_meta_info_field` is immutable
with pytest.raises(KeyError):
new_instance_data.pop('img_size')
# test pop attribute in instance_data_filed
new_instance_data['mask'] = torch.rand(1, 2, 3)
new_instance_data.pop('mask')
# test data_field has been updated
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
# test_keys
new_instance_data.mask = torch.ones(1, 2, 3)
'mask' in new_instance_data.keys()
has_flag = False
for key in new_instance_data.keys():
if key == 'mask':
has_flag = True
assert has_flag
# test values
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.values()))
mask = new_instance_data.mask
has_flag = False
for value in new_instance_data.values():
if value is mask:
has_flag = True
assert has_flag
# test items
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.items()))
mask = new_instance_data.mask
has_flag = False
for key, value in new_instance_data.items():
if value is mask:
assert key == 'mask'
has_flag = True
assert has_flag
# test device
new_instance_data = GeneralData()
if torch.cuda.is_available():
newnew_instance_data = new_instance_data.new()
devices = ('cpu', 'cuda')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cpu()
for value in newnew_instance_data.values():
assert not value.is_cuda
newnew_instance_data = new_instance_data.new()
devices = ('cuda', 'cpu')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cuda()
for value in newnew_instance_data.values():
assert value.is_cuda
# test to
double_instance_data = instance_data.new()
double_instance_data.long = torch.LongTensor(1, 2, 3, 4)
double_instance_data.bool = torch.BoolTensor(1, 2, 3, 4)
double_instance_data = instance_data.to(torch.double)
for k, v in double_instance_data.items():
if isinstance(v, torch.Tensor):
assert v.dtype is torch.double
# test .cpu() .cuda()
if torch.cuda.is_available():
cpu_instance_data = double_instance_data.new()
cpu_instance_data.mask = torch.rand(1)
cuda_tensor = torch.rand(1, 2, 3).cuda()
cuda_instance_data = cpu_instance_data.to(cuda_tensor.device)
for value in cuda_instance_data.values():
assert value.is_cuda
cpu_instance_data = cuda_instance_data.cpu()
for value in cpu_instance_data.values():
assert not value.is_cuda
cuda_instance_data = cpu_instance_data.cuda()
for value in cuda_instance_data.values():
assert value.is_cuda
# test detach
grad_instance_data = double_instance_data.new()
grad_instance_data.mask = torch.rand(2, requires_grad=True)
grad_instance_data.mask_1 = torch.rand(2, requires_grad=True)
detach_instance_data = grad_instance_data.detach()
for value in detach_instance_data.values():
assert not value.requires_grad
# test numpy
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2, requires_grad=True)
tensor_instance_data.mask_1 = torch.rand(2, requires_grad=True)
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
if torch.cuda.is_available():
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2)
tensor_instance_data.mask_1 = torch.rand(2)
tensor_instance_data = tensor_instance_data.cuda()
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
instance_data['_c'] = 10000
instance_data.get('dad', None) is None
assert hasattr(instance_data, '_c')
del instance_data['_c']
assert not hasattr(instance_data, '_c')
instance_data.a = 1000
instance_data['a'] = 2000
assert instance_data['a'] == 2000
assert instance_data.a == 2000
assert instance_data.get('a') == instance_data['a'] == instance_data.a
instance_data._meta = 1000
assert '_meta' in instance_data.keys()
if torch.cuda.is_available():
instance_data.bbox = torch.ones(2, 3, 4, 5).cuda()
instance_data.score = torch.ones(2, 3, 4, 4)
else:
instance_data.bbox = torch.ones(2, 3, 4, 5)
assert len(instance_data.new().keys()) == 0
with pytest.raises(AttributeError):
instance_data.img_size = 100
for k, v in instance_data.items():
if k == 'bbox':
assert isinstance(v, torch.Tensor)
assert 'a' in instance_data
instance_data.pop('a')
assert 'a' not in instance_data
cpu_instance_data = instance_data.cpu()
for k, v in cpu_instance_data.items():
if isinstance(v, torch.Tensor):
assert not v.is_cuda
assert isinstance(cpu_instance_data.numpy().bbox, np.ndarray)
if torch.cuda.is_available():
cuda_resutls = instance_data.cuda()
for k, v in cuda_resutls.items():
if isinstance(v, torch.Tensor):
assert v.is_cuda
def test_instance_data():
meta_info = dict(
img_size=(256, 256),
path='dadfaff',
scale_factor=np.array([1.5, 1.5, 1, 1]))
data = dict(
bboxes=torch.rand(4, 4),
masks=torch.rand(4, 2, 2),
labels=np.random.rand(4),
size=[(i, i) for i in range(4)])
# test init
instance_data = InstanceData(meta_info)
assert 'path' in instance_data
instance_data = InstanceData(meta_info, data=data)
assert len(instance_data) == 4
instance_data.set_data(data)
assert len(instance_data) == 4
meta_info = copy.deepcopy(meta_info)
meta_info['img_name'] = 'flag'
# test newinstance_data
new_instance_data = instance_data.new(meta_info=meta_info)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, meta_info[k])
assert k == 'img_name'
# meta info is immutable
with pytest.raises(KeyError):
meta_info = copy.deepcopy(meta_info)
meta_info['path'] = 'fdasfdsd'
instance_data.new(meta_info=meta_info)
# data fields should have same length
with pytest.raises(AssertionError):
temp_data = copy.deepcopy(data)
temp_data['bboxes'] = torch.rand(5, 4)
instance_data.new(data=temp_data)
temp_data = copy.deepcopy(data)
temp_data['scores'] = torch.rand(4)
new_instance_data = instance_data.new(data=temp_data)
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'scores'
assert _equal(v, temp_data[k])
instance_data = instance_data.new()
# test __setattr__
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
# all attribute in instance_data_field should be
# (torch.Tensor, np.ndarray, list))
with pytest.raises(AssertionError):
instance_data.a = 1000
# instance_data field should has same length
new_instance_data = instance_data.new()
new_instance_data.det_bbox = torch.rand(100, 4)
new_instance_data.det_label = torch.arange(100)
with pytest.raises(AssertionError):
new_instance_data.scores = torch.rand(101, 1)
new_instance_data.none = [None] * 100
with pytest.raises(AssertionError):
new_instance_data.scores = [None] * 101
new_instance_data.numpy_det = np.random.random([100, 1])
with pytest.raises(AssertionError):
new_instance_data.scores = np.random.random([101, 1])
# isinstance(str, slice, int, torch.LongTensor, torch.BoolTensor)
item = torch.Tensor([1, 2, 3, 4])
with pytest.raises(AssertionError):
new_instance_data[item]
len(new_instance_data[item.long()]) == 1
# when input is a bool tensor, The shape of
# the input at index 0 should equal to
# the value length in instance_data_field
with pytest.raises(AssertionError):
new_instance_data[item.bool()]
for i in range(len(new_instance_data)):
assert new_instance_data[i].det_label == i
assert len(new_instance_data[i]) == 1
# assert the index should in 0 ~ len(instance_data) -1
with pytest.raises(IndexError):
new_instance_data[101]
# assert the index should not be an empty tensor
new_new_instance_data = new_instance_data.new()
with pytest.raises(AssertionError):
new_new_instance_data[0]
# test str
with pytest.raises(AssertionError):
instance_data.img_size_dummmy = meta_info['img_size']
# test slice
ten_ressults = new_instance_data[:10]
len(ten_ressults) == 10
for v in ten_ressults.values():
assert len(v) == 10
# test Longtensor
long_tensor = torch.randint(100, (50, ))
long_index_instance_data = new_instance_data[long_tensor]
assert len(long_index_instance_data) == len(long_tensor)
for key, value in long_index_instance_data.items():
if not isinstance(value, list):
assert (long_index_instance_data[key] == new_instance_data[key]
[long_tensor]).all()
else:
len(long_tensor) == len(value)
# test bool tensor
bool_tensor = torch.rand(100) > 0.5
bool_index_instance_data = new_instance_data[bool_tensor]
assert len(bool_index_instance_data) == bool_tensor.sum()
for key, value in bool_index_instance_data.items():
if not isinstance(value, list):
assert (bool_index_instance_data[key] == new_instance_data[key]
[bool_tensor]).all()
else:
assert len(value) == bool_tensor.sum()
num_instance = 1000
instance_data_list = []
# assert len(instance_lists) > 0
with pytest.raises(AssertionError):
instance_data.cat(instance_data_list)
for _ in range(2):
instance_data['bbox'] = torch.rand(num_instance, 4)
instance_data['label'] = torch.rand(num_instance, 1)
instance_data['mask'] = torch.rand(num_instance, 224, 224)
instance_data['instances_infos'] = [1] * num_instance
instance_data['cpu_bbox'] = np.random.random((num_instance, 4))
if torch.cuda.is_available():
instance_data.cuda_tensor = torch.rand(num_instance).cuda()
assert instance_data.cuda_tensor.is_cuda
cuda_instance_data = instance_data.cuda()
assert cuda_instance_data.cuda_tensor.is_cuda
assert len(instance_data[0]) == 1
with pytest.raises(IndexError):
return instance_data[num_instance + 1]
with pytest.raises(AssertionError):
instance_data.centerness = torch.rand(num_instance + 1, 1)
mask_tensor = torch.rand(num_instance) > 0.5
length = mask_tensor.sum()
assert len(instance_data[mask_tensor]) == length
index_tensor = torch.LongTensor([1, 5, 8, 110, 399])
length = len(index_tensor)
assert len(instance_data[index_tensor]) == length
instance_data_list.append(instance_data)
cat_resutls = InstanceData.cat(instance_data_list)
assert len(cat_resutls) == num_instance * 2
instances = InstanceData(data=dict(bboxes=torch.rand(4, 4)))
# cat only single instance
assert len(InstanceData.cat([instances])) == 4
|
nmt/model.py | ujos89/DualRL | 293 | 12629441 | <gh_stars>100-1000
import tensorflow as tf
import opennmt as onmt
from utils import constants
from opennmt.layers.common import embedding_lookup
from utils import optim
class NMT(object):
"""A sequence-to-sequence model."""
def __init__(self, mode, params, src_vocab_size, tgt_vocab_size,
src_emb, tgt_emb, src_vocab_rev, tgt_vocab_rev, direction=''):
self.name = constants.NMT_VAR_SCOPE + direction
self.src_vocab_size = src_vocab_size
self.tgt_vocab_size = tgt_vocab_size
self.params = params
self.mode = mode
self.src_emb = src_emb
self.tgt_emb = tgt_emb
self.src_vocab_rev = src_vocab_rev
self.tgt_vocab_rev = tgt_vocab_rev
self.global_step = tf.Variable(0, trainable=False)
self.lr = self.params["learning_rate"]
self.sampling_probability = self.params.get("sampling_probability", 1.0)
self.input_ids = tf.placeholder(tf.int32, shape=(None, None), name=constants.INPUT_IDS)
self.input_length = tf.placeholder(tf.int32, shape=(None,), name=constants.INPUT_LENGTH)
self.target_ids_in = tf.placeholder(tf.int32, shape=(None, None), name=constants.LABEL_IDS_IN)
self.target_ids_out = tf.placeholder(tf.int32, shape=(None, None), name=constants.LABEL_IDS_OUT)
self.target_length = tf.placeholder(tf.int32, shape=(None,), name=constants.LABEL_LENGTH)
self.target_length_in_or_out = self.target_length + 1
self.reward = tf.placeholder(tf.float32, shape=(None,), name=constants.REWARD)
encoder_decoder_type = self.params.get("encoder_decoder_type", "bilstm")
print("Adopt {} as encoder and decoder".format(encoder_decoder_type))
if encoder_decoder_type.lower() == "bilstm":
self.encoder = onmt.encoders.BidirectionalRNNEncoder(params["n_layer"], params["encoder_units"])
if params["decoder_units"] == params["encoder_units"]:
print("RNN Decoder CopyBridge")
self.decoder = onmt.decoders.AttentionalRNNDecoder(params["n_layer"], params["decoder_units"],
bridge=onmt.layers.CopyBridge())
else:
print("RNN Decoder DenseBridge")
self.decoder = onmt.decoders.AttentionalRNNDecoder(params["n_layer"], params["decoder_units"],
bridge=onmt.layers.DenseBridge())
elif encoder_decoder_type.lower() == "transformer":
# Change to transformer: n_layer is 4 or 6, and encoder_units/decoder_units is 256 or 512
self.encoder = onmt.encoders.SelfAttentionEncoder(params["n_layer"], params["encoder_units"])
self.decoder = onmt.decoders.SelfAttentionDecoder(params["n_layer"], params["decoder_units"])
else:
raise ValueError("Unrecognized encoder_decoder_type: {}".format(encoder_decoder_type))
self.logits, self.predictions = self.build()
if mode != constants.INFER:
self.loss_per_sequence, self.loss = self.compute_loss()
self.lr_loss = self.compute_rl_loss()
if mode == constants.TRAIN:
with tf.variable_scope('train') as scope:
self.train_op = self.train(self.loss)
scope.reuse_variables()
self.retrain_op = self.train(self.lr_loss)
# only save NMT vars when dual training
var_list = [var for var in tf.global_variables() if self.name in var.name]
self.saver = tf.train.Saver(var_list=var_list, max_to_keep=10) # Must in the end of model define
def get_variable_initializer(self):
if self.params["initializer"] == "random_uniform":
return tf.random_uniform_initializer(1, -1)
elif self.params["initializer"] == "normal_unit_scaling":
return tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_AVG", uniform=False)
elif self.params["initializer"] == "uniform_unit_scaling":
return tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode="FAN_AVG", uniform=True)
else:
raise ValueError("Unrecognized initializer: {}".format(self.params["initializer"]))
def build(self):
with tf.variable_scope(self.name, initializer=self.get_variable_initializer()):
encoder_outputs_tuple = self.encode()
logits, predictions = self.decode(encoder_outputs_tuple)
return logits, predictions
def encode(self, reuse=None):
input_ids, input_length = self.input_ids, self.input_length
input_ = embedding_lookup(self.src_emb, input_ids)
with tf.variable_scope("encoder", reuse=reuse):
return self.encoder.encode(input_, sequence_length=input_length, mode=self.mode)
def decode(self, encoder_outputs_tuple, output_layer=None, reuse=False):
(encoder_outputs, encoder_state, encoder_sequence_length) = encoder_outputs_tuple
self.encoder_outputs = encoder_outputs
input_ids, target_ids_in, target_length_in = self.input_ids, self.target_ids_in, self.target_length_in_or_out
with tf.variable_scope("decoder", reuse=reuse):
if output_layer is None:
output_layer = tf.layers.Dense(self.tgt_vocab_size)
output_layer.build([None, encoder_outputs.get_shape()[-1]])
predictions = None
logits = None
if self.mode != constants.INFER:
target_in = embedding_lookup(self.tgt_emb, target_ids_in)
logits, _, _ = self.decoder.decode(
target_in,
target_length_in,
vocab_size=self.tgt_vocab_size,
initial_state=encoder_state,
mode=self.mode,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
output_layer=output_layer)
else:
batch_size = tf.shape(encoder_sequence_length)[0]
maximum_iterations = self.params.get("maximum_iterations", 100)
start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
end_token = constants.END_OF_SENTENCE_ID
decode_type = self.params.get("decode_type", constants.GREEDY)
decode_width = self.params.get("decode_width", 1)
if decode_type == constants.RANDOM:
print("random decode_width:", decode_width)
tile_start_tokens = tf.contrib.seq2seq.tile_batch(start_tokens, multiplier=decode_width)
tile_encoder_state = tf.contrib.seq2seq.tile_batch(encoder_state, multiplier=decode_width)
tile_encoder_outputs = tf.contrib.seq2seq.tile_batch(encoder_outputs, multiplier=decode_width)
tile_encoder_sequence_length = tf.contrib.seq2seq.tile_batch(encoder_sequence_length,
multiplier=decode_width)
sampled_ids, _, sampled_length, log_probs, alignment = self.decoder.dynamic_decode(
self.tgt_emb,
tile_start_tokens,
end_token,
vocab_size=self.tgt_vocab_size,
initial_state=tile_encoder_state,
output_layer=output_layer,
maximum_iterations=maximum_iterations,
mode=self.mode,
memory=tile_encoder_outputs,
memory_sequence_length=tile_encoder_sequence_length,
return_alignment_history=True,
sample_from=0,
# penalize_previous_words=True # True for Transformer
)
sampled_ids = tf.reshape(sampled_ids, (batch_size, decode_width, -1))
sampled_length = tf.reshape(sampled_length, (batch_size, decode_width))
log_probs = tf.reshape(log_probs, (batch_size, decode_width))
elif decode_type == constants.BEAM:
sampled_ids, _, sampled_length, log_probs, alignment = \
self.decoder.dynamic_decode_and_search(
self.tgt_emb,
start_tokens,
end_token,
vocab_size=self.tgt_vocab_size,
initial_state=encoder_state,
output_layer=output_layer,
beam_width=decode_width,
maximum_iterations=maximum_iterations,
mode=self.mode,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
return_alignment_history=True)
elif decode_type == constants.GREEDY or decode_width <= 1:
sampled_ids, _, sampled_length, log_probs, alignment = self.decoder.dynamic_decode(
self.tgt_emb,
start_tokens,
end_token,
vocab_size=self.tgt_vocab_size,
initial_state=encoder_state,
output_layer=output_layer,
maximum_iterations=maximum_iterations,
mode=self.mode,
memory=encoder_outputs,
memory_sequence_length=encoder_sequence_length,
return_alignment_history=True)
target_tokens = self.tgt_vocab_rev.lookup(tf.cast(sampled_ids, tf.int64))
predictions = {
"ids": sampled_ids,
"tokens": target_tokens,
"length": sampled_length,
"log_probs": log_probs}
return logits, predictions
def compute_loss(self):
max_time = tf.shape(self.logits)[1]
weights = tf.sequence_mask(self.target_length_in_or_out, maxlen=max_time, dtype=tf.float32)
loss_per_token = tf.contrib.seq2seq.sequence_loss(self.logits,
self.target_ids_out,
weights,
average_across_timesteps=False,
average_across_batch=False)
loss_per_sequence = tf.reduce_sum(loss_per_token, 1) / (tf.reduce_sum(weights + 1e-12, axis=1))
mean_loss = tf.reduce_mean(loss_per_sequence)
tf.summary.scalar("loss", mean_loss)
return loss_per_sequence, mean_loss
def train(self, loss):
vars_list = [var for var in tf.trainable_variables() if self.name in var.name]
params = self.params
train_op = optim.optimize(loss, params, trainable_varaibles=vars_list)
return train_op
def eval(self):
return self.compute_loss()
def infer(self):
return self.predictions
def apply_gradients(self, grads, var_list, optimizer=None):
if optimizer is None:
optimizer = tf.train.AdamOptimizer(self.lr)
train_op = optimizer.apply_gradients(zip(grads, var_list))
return train_op
def compute_rl_loss(self):
rl_loss = self.loss_per_sequence * self.reward
rl_loss = tf.reduce_mean(rl_loss)
return rl_loss
|
traffic/data/eurocontrol/aixm/navpoints.py | xoolive/traffic | 209 | 12629446 | import logging
import zipfile
from pathlib import Path
from typing import Any, Dict, List, Optional, Type, TypeVar, Union
from lxml import etree
import pandas as pd
from ...basic.navaid import Navaids
# https://github.com/python/mypy/issues/2511
T = TypeVar("T", bound="AIXMNavaidParser")
class AIXMNavaidParser(Navaids):
name: str = "aixm_navaids"
filename: Path
priority: int = 2
_extensions: Optional[pd.DataFrame] = None
@property
def available(self) -> bool:
if self.filename is None:
return False
dp_file = next(self.filename.glob("DesignatedPoint.BASELINE"), None)
navaid_file = next(self.filename.glob("Navaid.BASELINE"), None)
return dp_file is not None and navaid_file is not None
@property
def extensions(self) -> pd.DataFrame:
if self._extensions is not None:
return self._extensions
cache_file = self.cache_dir / (self.filename.stem + "_aixm_points.pkl")
extension_file = self.cache_dir / (
self.filename.stem + "_aixm_points_extensions.pkl"
)
if not extension_file.exists():
self.parse_data()
if self._data is not None:
self._data.to_pickle(cache_file)
if self._extensions is not None:
self._extensions.to_pickle(extension_file)
else:
logging.info("Loading aixm points database")
self._extensions = pd.read_pickle(extension_file)
return self._extensions
@property
def data(self) -> pd.DataFrame:
if self._data is not None:
return self._data
cache_file = self.cache_dir / (self.filename.stem + "_aixm_points.pkl")
extension_file = self.cache_dir / (
self.filename.stem + "_aixm_points_extensions.pkl"
)
if not cache_file.exists():
self.parse_data()
if self._data is not None:
self._data.to_pickle(cache_file)
if self._extensions is not None:
self._extensions.to_pickle(extension_file)
else:
logging.info("Loading aixm points database")
self._data = pd.read_pickle(cache_file)
return self._data
@classmethod
def from_file(
cls: Type[T], filename: Union[Path, str], **kwargs: Any
) -> Optional[T]:
instance = cls(None)
instance.filename = Path(filename)
return instance
def parse_data(self) -> None:
dirname = Path(self.filename)
all_points: Dict[str, Dict[str, Any]] = {}
extensions: List[Dict[str, Any]] = []
for filename in ["DesignatedPoint.BASELINE", "Navaid.BASELINE"]:
if not (dirname / filename).exists():
zippath = zipfile.ZipFile(
dirname.joinpath(f"{filename}.zip").as_posix()
)
zippath.extractall(dirname.as_posix())
ns: Dict[str, str] = dict()
# The versions for namespaces may be incremented and make everything
# fail just for that reason!
for _, (key, value) in etree.iterparse(
(dirname / "DesignatedPoint.BASELINE").as_posix(),
events=["start-ns"],
):
ns[key] = value
points = etree.parse((dirname / "DesignatedPoint.BASELINE").as_posix())
for point in points.findall(
"adrmsg:hasMember/aixm:DesignatedPoint", ns
):
identifier = point.find("gml:identifier", ns)
assert identifier is not None
assert identifier.text is not None
floats = point.find(
"aixm:timeSlice/aixm:DesignatedPointTimeSlice/"
"aixm:location/aixm:Point/gml:pos",
ns,
)
assert floats is not None
assert floats.text is not None
designator = point.find(
"aixm:timeSlice/aixm:DesignatedPointTimeSlice/aixm:designator",
ns,
)
type_ = point.find(
"aixm:timeSlice/aixm:DesignatedPointTimeSlice/aixm:type",
ns,
)
name = designator.text if designator is not None else None
type_str = type_.text if type_ is not None else None
coords = tuple(float(x) for x in floats.text.split())
all_points[identifier.text] = {
"latitude": coords[0],
"longitude": coords[1],
"name": name,
"type": type_str,
"id": identifier.text,
}
extension = point.find(
"aixm:timeSlice/aixm:DesignatedPointTimeSlice/"
"aixm:extension",
ns,
)
if extension is not None:
for point_usage in extension.findall(
"adrext:DesignatedPointExtension/"
"adrext:pointUsage/adrext:PointUsage",
ns,
):
role = point_usage.find("adrext:role", ns)
elt = dict(id=identifier.text, role=role.text)
airspace = point_usage.find("adrext:reference_airspace", ns)
if airspace is not None:
airspace_ref = airspace.attrib["{%s}href" % ns["xlink"]]
elt["airspace"] = airspace_ref.split(":")[-1]
reference_border = point_usage.find(
"adrext:reference_border", ns
)
if reference_border is not None:
path = "adrext:AirspaceBorderCrossingObject/"
path += "adrext:{}edAirspace".format(
"enter" if role.text == "FRA_ENTRY" else "exit"
)
airspace = reference_border.find(path, ns)
assert airspace is not None
airspace_ref = airspace.attrib["{%s}href" % ns["xlink"]]
elt["airspace"] = airspace_ref.split(":")[-1]
extensions.append(elt)
points = etree.parse((dirname / "Navaid.BASELINE").as_posix())
for point in points.findall("adrmsg:hasMember/aixm:Navaid", ns):
identifier = point.find("gml:identifier", ns)
assert identifier is not None
assert identifier.text is not None
floats = point.find(
"aixm:timeSlice/aixm:NavaidTimeSlice/"
"aixm:location/aixm:ElevatedPoint/gml:pos",
ns,
)
assert floats is not None
assert floats.text is not None
designator = point.find(
"aixm:timeSlice/aixm:NavaidTimeSlice/aixm:designator", ns
)
type_ = point.find(
"aixm:timeSlice/aixm:NavaidTimeSlice/aixm:type", ns
)
description = point.find(
"aixm:timeSlice/aixm:NavaidTimeSlice/aixm:name", ns
)
name = designator.text if designator is not None else None
type_str = type_.text if type_ is not None else None
description_str = (
description.text if description is not None else None
)
coords = tuple(float(x) for x in floats.text.split())
all_points[identifier.text] = {
"latitude": coords[0],
"longitude": coords[1],
"name": name,
"type": type_str,
"description": description_str,
"id": identifier.text,
}
extension = point.find(
"aixm:timeSlice/aixm:DesignatedPointTimeSlice/"
"aixm:extension",
ns,
)
if extension is not None:
for point_usage in extension.findall(
"adrext:DesignatedPointExtension/"
"adrext:pointUsage/adrext:PointUsage",
ns,
):
role = point_usage.find("adrext:role", ns)
elt = dict(id=identifier.text, role=role.text)
airspace = point_usage.find("adrext:reference_airspace", ns)
if airspace is not None:
airspace_ref = airspace.attrib["{%s}href" % ns["xlink"]]
elt["airspace"] = airspace_ref.split(":")[-1]
reference_border = point_usage.find(
"adrext:reference_border", ns
)
if reference_border is not None:
path = "adrext:AirspaceBorderCrossingObject/"
path += "adrext:{}edAirspace".format(
"enter" if role.text == "FRA_ENTRY" else "exit"
)
airspace = reference_border.find(path, ns)
assert airspace is not None
airspace_ref = airspace.attrib["{%s}href" % ns["xlink"]]
elt["airspace"] = airspace_ref.split(":")[-1]
path += "adrext:{}edAirspace".format(
"exit" if role.text == "FRA_ENTRY" else "enter"
)
airspace = reference_border.find(path, ns)
assert airspace is not None
airspace_ref = airspace.attrib["{%s}href" % ns["xlink"]]
elt["other"] = airspace_ref.split(":")[-1]
extensions.append(elt)
self._data = pd.DataFrame.from_records(
point for point in all_points.values()
)
self._extensions = pd.DataFrame.from_records(extensions)
return
|
data/transcoder_evaluation_gfg/python/LCS_FORMED_CONSECUTIVE_SEGMENTS_LEAST_LENGTH_K.py | mxl1n/CodeGen | 241 | 12629484 | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( k , s1 , s2 ) :
n = len ( s1 )
m = len ( s2 )
lcs = [ [ 0 for x in range ( m + 1 ) ] for y in range ( n + 1 ) ]
cnt = [ [ 0 for x in range ( m + 1 ) ] for y in range ( n + 1 ) ]
for i in range ( 1 , n + 1 ) :
for j in range ( 1 , m + 1 ) :
lcs [ i ] [ j ] = max ( lcs [ i - 1 ] [ j ] , lcs [ i ] [ j - 1 ] )
if ( s1 [ i - 1 ] == s2 [ j - 1 ] ) :
cnt [ i ] [ j ] = cnt [ i - 1 ] [ j - 1 ] + 1 ;
if ( cnt [ i ] [ j ] >= k ) :
for a in range ( k , cnt [ i ] [ j ] + 1 ) :
lcs [ i ] [ j ] = max ( lcs [ i ] [ j ] , lcs [ i - a ] [ j - a ] + a )
return lcs [ n ] [ m ]
#TOFILL
if __name__ == '__main__':
param = [
(4,'aggayxysdfa','aggajxaaasdfa',),
(2,'55571659965107','390286654154',),
(3,'01011011100','0000110001000',),
(5,'aggasdfa','aggajasdfaxy',),
(2,'5710246551','79032504084062',),
(3,'0100010','10100000',),
(3,'aabcaaaa','baaabcd',),
(1,'1219','3337119582',),
(2,'111000011','011',),
(2,'wiC oD','csiuGOUwE',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) |
core/run/profiler/pytroch.py | zhangzhengde0225/SwinTrack | 143 | 12629493 | import torch
from torch.profiler import profile, ProfilerActivity, tensorboard_trace_handler
class PytorchProfiler:
def __init__(self, output_path, device: str):
profile_activities = [ProfilerActivity.CPU]
if 'cuda' in device:
profile_activities += [ProfilerActivity.CUDA]
self.profile = profile(activities=profile_activities,
on_trace_ready=tensorboard_trace_handler(output_path),
record_shapes=True, with_stack=True, with_flops=True)
def __enter__(self):
self.profile.__enter__()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.profile.__exit__(exc_type, exc_val, exc_tb)
def step(self):
self.profile.step()
|
deephyper/evaluator/_encoder.py | felixeperez/deephyper | 185 | 12629516 | <filename>deephyper/evaluator/_encoder.py
from inspect import isclass
import json
import types
import uuid
import ConfigSpace as cs
import ConfigSpace.hyperparameters as csh
import skopt
from ConfigSpace.read_and_write import json as cs_json
from numpy import bool_, floating, integer, ndarray
class Encoder(json.JSONEncoder):
"""
Enables JSON dump of numpy data, python functions.
"""
def default(self, obj):
if isinstance(obj, uuid.UUID):
return str(obj)
elif isinstance(obj, integer):
return int(obj)
elif isinstance(obj, floating):
return float(obj)
elif isinstance(obj, bool_):
return bool(obj)
elif isinstance(obj, ndarray):
return obj.tolist()
elif isinstance(obj, types.FunctionType) or isclass(obj):
return f"{obj.__module__}.{obj.__name__}"
elif isinstance(obj, skopt.space.Dimension):
return str(obj)
elif isinstance(obj, csh.Hyperparameter):
return str(obj)
elif isinstance(obj, cs.ConfigurationSpace):
return json.loads(cs_json.write(obj))
else:
return super(Encoder, self).default(obj)
|
v3/as_demos/aledflash.py | Dilepa/micropython-async | 443 | 12629526 | # aledflash.py Demo/test program for MicroPython asyncio
# Author: <NAME>
# Copyright <NAME> 2020 Released under the MIT license
# Flashes the onboard LED's each at a different rate. Stops after ten seconds.
# Run on MicroPython board bare hardware
import pyb
import uasyncio as asyncio
async def toggle(objLED, time_ms):
while True:
await asyncio.sleep_ms(time_ms)
objLED.toggle()
# TEST FUNCTION
async def main(duration):
print("Flash LED's for {} seconds".format(duration))
leds = [pyb.LED(x) for x in range(1,4)] # Initialise three on board LED's
for x, led in enumerate(leds): # Create a task for each LED
t = int((0.2 + x/2) * 1000)
asyncio.create_task(toggle(leds[x], t))
await asyncio.sleep(duration)
def test(duration=10):
try:
asyncio.run(main(duration))
except KeyboardInterrupt:
print('Interrupted')
finally:
asyncio.new_event_loop()
print('as_demos.aledflash.test() to run again.')
test()
|
oscar/run_oscarplus_pretrain.py | ruotianluo/Oscar | 828 | 12629543 | <reponame>ruotianluo/Oscar<gh_stars>100-1000
from __future__ import absolute_import, division, print_function
import argparse
import datetime
import json
import logging
import os
import random
import sys
import time
import math
import shutil
sys.path.insert(0, '.')
import numpy as np
import torch
from oscar.modeling.modeling_bert import BertImgForPreTraining
from transformers.pytorch_transformers import (WEIGHTS_NAME, BertConfig,
BertTokenizer)
from oscar.datasets.build import make_data_loader
from transformers.pytorch_transformers import AdamW, WarmupLinearSchedule
from oscar.utils.misc import mkdir, get_rank
from oscar.utils.metric_logger import TensorboardLogger
logger = logging.getLogger(__name__)
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig,)), ())
MODEL_CLASSES = {
'bert': (BertConfig, BertImgForPreTraining, BertTokenizer),
}
""" ****** Pretraining ****** """
def main():
parser = argparse.ArgumentParser()
## Required parameters
parser.add_argument("--data_dir", default=None, type=str, required=False,
help="The input data dir. "
"Should contain the .yaml files for the task.")
parser.add_argument("--dataset_file", default=None, type=str, required=True,
help="The training dataset yaml file.")
parser.add_argument("--extra_dataset_file", default=None, type=str, required=False,
help="The extra training dataset yaml file.")
parser.add_argument("--bert_model", default=None, type=str, required=True,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the model checkpoints will be written.")
# image chunks
parser.add_argument("--chunk_start_id", default=-1, type=int,
help="Image Chunk Start ID")
parser.add_argument("--chunk_end_id", default=-1, type=int,
help="Image Chunk End ID")
## Image parameters
parser.add_argument("--max_img_seq_length", default=50, type=int,
help="The maximum total input image sequence length.")
parser.add_argument("--img_feature_dim", default=2054, type=int,
help="The Image Feature Dimension.")
parser.add_argument("--img_feature_type", default='faster_r-cnn', type=str,
help="faster_r-cnn or mask_r-cnn")
parser.add_argument("--use_layernorm", action='store_true',
help="use_layernorm")
parser.add_argument("--drop_out", default=0.1, type=float,
help="Drop out for BERT.")
parser.add_argument("--use_b", type=int, default=1, help="use_b")
parser.add_argument("--textb_sample_mode", type=int, default=0,
help="0: sample from both texta&textb, "
"1: sample from textb, "
"2: sample from QA answers")
parser.add_argument("--extra_textb_sample_mode", type=int, default=1)
parser.add_argument("--texta_false_prob", type=float, default=0.0,
help="the probality that we sample wrong texta, should in [0.0, 0.5]")
parser.add_argument("--model_name_or_path", default=None, type=str,
required=True,
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(
ALL_MODELS))
parser.add_argument("--config_name", default="", type=str,
help="Pretrained config name or path if not the same as model_name")
parser.add_argument("--tokenizer_name", default="", type=str,
help="Pretrained tokenizer name or path if not the same as model_name")
parser.add_argument("--cache_dir", default="", type=str,
help="Where do you want to store the pre-trained models downloaded from s3")
parser.add_argument("--max_seq_length", default=35, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter than this will be padded.")
parser.add_argument("--do_train", action='store_true',
help="Whether to run training.")
parser.add_argument("--learning_rate", default=5e-5, type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--max_iters", default=2000000, type=int,
help="Maximal number of training iterations.")
parser.add_argument("--train_batch_size", default=1024, type=int,
help="Batch size for training.")
parser.add_argument("--num_workers", default=6, type=int,
help="Number of workers for dataset.")
parser.add_argument("--adam_epsilon", default=1e-8, type=float,
help="Epsilon for Adam optimizer.")
parser.add_argument("--optim", default='adamw', type=str,
help="The optimizer used for Bert, [adamw, lamb], default: adamw")
parser.add_argument("--max_grad_norm", default=-1.0, type=float, help="Max gradient norm.")
parser.add_argument("--warmup_steps", default=0, type=int,
help="Linear warmup over warmup_steps.")
parser.add_argument("--no_cuda", action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument("--on_memory", action='store_true',
help="Whether to load train samples into memory or use disk")
parser.add_argument("--do_lower_case", action='store_true',
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument("--local_rank", type=int, default=-1,
help="local_rank for distributed training on gpus")
parser.add_argument('--seed', type=int, default=42,
help="random seed for initialization")
parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.")
parser.add_argument("--from_scratch", action='store_true',
help="train from scratch")
parser.add_argument("--use_img_layernorm", type=int, default=0,
help="Normalize image features with bertlayernorm")
parser.add_argument("--img_layer_norm_eps", default=1e-12, type=float,
help="The eps in image feature laynorm layer")
# distributed
parser.add_argument('--gpu_ids', type=str, default='-1')
parser.add_argument("--mask_loss_for_unmatched", type=int, default=1,
help="masked language model loss for unmatched triplets")
parser.add_argument("--extra_loss_weight", type=float, default=0.0,
help="the loss weight for the extra train data batch (should be in [0,1])")
parser.add_argument(
"--use_gtlabels",
type=int, default=1,
help="use groundtruth labels for text b or not"
)
# logging
parser.add_argument('--ckpt_period', type=int, default=10000,
help="Period for saving checkpoint")
parser.add_argument('--log_period', type=int, default=100,
help="Period for saving logging info")
args = parser.parse_args()
if args.gpu_ids != '-1':
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
args.num_gpus = int(
os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
args.distributed = args.num_gpus > 1
if args.gpu_ids != '-1':
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_ids
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
logger.info("Output Directory Exists.")
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device(
"cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
args.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(
backend='nccl', init_method="env://"
)
args.n_gpu = 1
args.device = device
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s",
args.local_rank, device, args.n_gpu, bool(args.local_rank != -1)
)
if args.gradient_accumulation_steps < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.n_gpu > 0:
torch.cuda.manual_seed_all(args.seed)
if not args.do_train:
raise ValueError(
"Training is currently the only implemented execution option. Please set `do_train`.")
if not os.path.exists(args.output_dir):
mkdir(args.output_dir)
last_checkpoint_dir = None
arguments = {"iteration": 0}
if os.path.exists(args.output_dir):
save_file = os.path.join(args.output_dir, "last_checkpoint")
try:
with open(save_file, "r") as f:
last_saved = f.read()
last_saved = last_saved.strip()
except IOError:
# if file doesn't exist, maybe because it has just been
# deleted by a separate process
last_saved = ""
if last_saved:
folder_name = os.path.splitext(last_saved.split('/')[0])[0] # in the form of checkpoint-00001 or checkpoint-00001/pytorch_model.bin
last_checkpoint_dir = os.path.join(args.output_dir, folder_name)
arguments["iteration"] = int(folder_name.split('-')[-1])
assert os.path.isfile(os.path.join(last_checkpoint_dir, WEIGHTS_NAME)), "Last_checkpoint detected, but file not found!"
# model first
if get_rank() != 0:
torch.distributed.barrier()
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.bert_model]
if last_checkpoint_dir is not None: # recovery
args.model_name_or_path = last_checkpoint_dir
logger.info(" -> Recovering model from {}".format(last_checkpoint_dir))
config = config_class.from_pretrained(
args.config_name if args.config_name else args.model_name_or_path,
)
config.img_layer_norm_eps = args.img_layer_norm_eps
config.use_img_layernorm = args.use_img_layernorm
# discrete code
config.img_feature_dim = args.img_feature_dim
config.img_feature_type = args.img_feature_type
config.hidden_dropout_prob = args.drop_out
if args.texta_false_prob < 0.5 and (args.texta_false_prob > 0 or not args.use_b):
args.num_contrast_classes = 3
else:
args.num_contrast_classes = 2
config.num_contrast_classes = args.num_contrast_classes
# Prepare model
# model = BertForPreTraining.from_pretrained(args.bert_model)
load_num = 0
while load_num < 10:
try:
model = BertImgForPreTraining.from_pretrained(
args.model_name_or_path,
from_tf=bool('.ckpt' in args.model_name_or_path),
config=config)
break
except:
load_num += 1
# train from scratch
if args.from_scratch:
if last_checkpoint_dir is None:
logger.info("Training from scratch ... ")
model.apply(model.init_weights)
total_params = sum(p.numel() for p in model.parameters())
logger.info(
'Total Parameters: {}'.format(total_params))
for key, val in vars(config).items():
setattr(args, key, val)
if get_rank() == 0 and args.local_rank != -1:
torch.distributed.barrier()
model.to(args.device)
logger.info("Training/evaluation parameters %s", args)
tb_log_dir = os.path.join(args.output_dir, 'train_logs')
meters = TensorboardLogger(
log_dir=tb_log_dir,
delimiter=" ",
)
# Prepare optimizer
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if
not any(nd in n for nd in no_decay)],
'weight_decay': 0.01},
{'params': [p for n, p in param_optimizer if
any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(optimizer,
warmup_steps=args.warmup_steps,
t_total=args.max_iters)
if arguments['iteration'] > 0 and os.path.isfile(os.path.join(last_checkpoint_dir, 'optimizer.pth')): # recovery
logger.info(
"Load BERT optimizer from {}".format(last_checkpoint_dir))
optimizer_to_load = torch.load(
os.path.join(last_checkpoint_dir, 'optimizer.pth'),
map_location=torch.device("cpu"))
optimizer.load_state_dict(optimizer_to_load.pop("optimizer"))
scheduler.load_state_dict(optimizer_to_load.pop("scheduler"))
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], output_device=args.local_rank,
find_unused_parameters=True)
elif args.n_gpu > 1:
model = torch.nn.DataParallel(model)
# train_examples = None
train_dataloaders = make_data_loader(
args, is_distributed=args.distributed, arguments=arguments
)
if isinstance(train_dataloaders, list):
train_dataloader = train_dataloaders[0]
else:
train_dataloader = train_dataloaders
train_dataloader_extra = [None] * len(train_dataloader)
if isinstance(train_dataloaders, list) and len(train_dataloaders) > 1:
logger.info("Having two train dataloaders!")
train_dataloader_extra = train_dataloaders[1]
tokenizer = train_dataloader.dataset.tokenizer
# torch.backends.cudnn.benchmark = True
max_iter = len(train_dataloader)
start_iter = arguments["iteration"]
logger.info("***** Running training *****")
logger.info(" Num examples = {}".format(len(train_dataloader.dataset)))
logger.info(" Instantaneous batch size = %d",
args.train_batch_size // args.gradient_accumulation_steps)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size)
logger.info(" Gradient Accumulation steps = %d",
args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d",
max_iter // args.gradient_accumulation_steps)
log_json = {}
model.train()
model.zero_grad()
clock_started = False
# Every args.ckpt_period, report train_score and save model
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
for step, (batch, batch_extra) in enumerate(zip(train_dataloader, train_dataloader_extra), start_iter):
if not clock_started:
start_training_time = time.time()
end = time.time()
clock_started = True
def data_process(mini_batch):
images, targets, qa_inds = \
mini_batch[0], mini_batch[1], mini_batch[2]
targets_transposed = list(zip(*targets))
input_ids = torch.stack(targets_transposed[0]).to(args.device, non_blocking=True)
input_mask = torch.stack(targets_transposed[1]).to(args.device, non_blocking=True)
segment_ids = torch.stack(targets_transposed[2]).to(args.device, non_blocking=True)
lm_label_ids = torch.stack(targets_transposed[3]).to(args.device, non_blocking=True)
is_next = torch.stack(targets_transposed[4]).to(args.device, non_blocking=True)
is_img_match = torch.stack(targets_transposed[5]).to(args.device, non_blocking=True)
return images, input_ids, input_mask, segment_ids, lm_label_ids, is_next
images1, input_ids1, input_mask1, segment_ids1, lm_label_ids1, is_next1 \
= data_process(batch)
if batch_extra is not None:
images2, input_ids2, input_mask2, segment_ids2, lm_label_ids2, is_next2 \
= data_process(batch_extra)
data_time = time.time() - end
def forward_backward(images, input_ids, input_mask, segment_ids,
lm_label_ids, is_next, loss_weight=1.0):
# feature as input
image_features = torch.stack(images).to(args.device, non_blocking=True)
outputs = model(input_ids, segment_ids, input_mask,
lm_label_ids, is_next, img_feats=image_features)
loss = loss_weight * outputs[0]
if args.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
loss.backward()
return loss.item(), input_ids.size(0)
start1 = time.time()
loss1, nb_tr_example1 = forward_backward(
images1, input_ids1, input_mask1,
segment_ids1, lm_label_ids1, is_next1,
loss_weight=1.0-args.extra_loss_weight
)
tr_loss += loss1
nb_tr_examples += nb_tr_example1
compute_time1 = time.time() - start1
loss2, nb_tr_example2 = 0.0, 0
compute_time2 = 0.0
if batch_extra is not None:
start2 = time.time()
loss2, nb_tr_example2 = forward_backward(
images2, input_ids2, input_mask2,
segment_ids2, lm_label_ids2, is_next2,
loss_weight=args.extra_loss_weight
)
tr_loss += loss2
nb_tr_examples += nb_tr_example2
compute_time2 = time.time() - start2
nb_tr_steps += 1
arguments["iteration"] = step + 1
if (step + 1) % args.gradient_accumulation_steps == 0:
# do gradient clipping
if args.max_grad_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
# do the optimization steps
optimizer.step()
scheduler.step() # Update learning rate schedule
optimizer.zero_grad()
# measure elapsed time
batch_time = time.time() - end
end = time.time()
metrics_to_log = {
'time_info': {'compute': batch_time, 'data': data_time,
'compute1': compute_time1,
'compute2': compute_time2},
'batch_metrics': {'loss': loss1+loss2}
}
params_to_log = {'params': {'bert_lr': optimizer.param_groups[0]["lr"]}}
meters.update_metrics(metrics_to_log)
meters.update_params(params_to_log)
if args.log_period > 0 and (step + 1) % args.log_period == 0:
avg_time = meters.meters['time_info']['compute'].global_avg
eta_seconds = avg_time * (max_iter - step - 1)
eta_string = str(
datetime.timedelta(seconds=int(eta_seconds)))
logger.info(
meters.delimiter.join(
[
"eta: {eta}",
"iter: {iter}",
"max mem: {memory:.0f}",
]
).format(
eta=eta_string,
iter=step + 1,
memory=torch.cuda.max_memory_allocated() / 1024.0 / 1024.0,
) + "\n " + meters.get_logs(step + 1)
)
if (step + 1) == max_iter or (step + 1) % args.ckpt_period == 0: # Save a trained model
log_json[step+1] = tr_loss
train_metrics_total = torch.Tensor([tr_loss, nb_tr_examples, nb_tr_steps]).to(args.device)
torch.distributed.all_reduce(train_metrics_total)
# reset metrics
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
if get_rank() == 0:
# report metrics
train_score_gathered = train_metrics_total[0] / \
train_metrics_total[2]
logger.info("PROGRESS: {}%".format(
round(100 * (step + 1) / max_iter, 4)))
logger.info(
"EVALERR: {}%".format(train_score_gathered))
meters.update_metrics(
{
'epoch_metrics': {'ex_cnt': train_metrics_total[1],
'loss': train_score_gathered}
}
)
with open(os.path.join(args.output_dir, 'loss_logs.json'),
'w') as fp:
json.dump(log_json, fp)
# save checkpoint
output_dir = os.path.join(args.output_dir,
'checkpoint-{:07d}'.format(
step + 1))
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(
model,
'module') else model # Take care of distributed/parallel training
optimizer_to_save = {
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict()}
save_num = 0
while save_num < 10:
try:
model_to_save.save_pretrained(output_dir)
torch.save(args, os.path.join(output_dir,
'training_args.bin'))
tokenizer.save_pretrained(output_dir)
torch.save(optimizer_to_save,
os.path.join(output_dir,
'optimizer.pth'))
save_file = os.path.join(args.output_dir, "last_checkpoint")
with open(save_file, "w") as f:
f.write('checkpoint-{:07d}/pytorch_model.bin'.format(step + 1))
break
except:
save_num += 1
logger.info(
"Saving model checkpoint {0} to {1}".format(
step + 1, output_dir))
if clock_started:
total_training_time = time.time() - start_training_time
else:
total_training_time = 0.0
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / it)".format(
total_time_str, total_training_time / max_iter
)
)
# close the tb logger
meters.close()
if __name__ == "__main__":
main()
|
ydkgen/printer/printer_factory.py | YDK-Solutions/ydk | 125 | 12629544 | # ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
"""
printer_factory.py
Returns printer
"""
from ydkgen.printer.cpp.cpp_bindings_printer import CppBindingsPrinter
from ydkgen.printer.python.python_bindings_printer import PythonBindingsPrinter
from ydkgen.printer.go.go_bindings_printer import GoBindingsPrinter
class PrinterFactory(object):
def get_printer(self, language):
if language == 'cpp':
return CppBindingsPrinter
elif language == 'python':
return PythonBindingsPrinter
elif language == 'go':
return GoBindingsPrinter
else:
raise Exception('Language {0} not yet supported'.format(language))
|
classes/mediainfo.py | tmcdonagh/Autorippr | 162 | 12629545 | # -*- coding: utf-8 -*-
"""
Created on Mon Jan 09 11:20:23 2017
Dependencies:
System:
mediainfo
mkvtoolnix
Python (nonstandard library):
pymediainfo
For Windows, if mediainfo or mkvpropedit aren't in PATH, must give path to .dll (mediainfo)
or .exe (mkvpropedit) file
For *nixs, use path to binary (although it's likly in PATH)
Takes an mkv file and analyzes it for foreign subtitle track. Assumes that foreign subtitle
track files are smaller in bit size but the same length as the main language track
@author: brodi
"""
import os
from pymediainfo import MediaInfo
from pipes import quote
import logger
import shlex
import subprocess
# main class that initializes settings for discovering/flagging a forced subtitle track
# edits python's os.environ in favor of putting full string when calling executables
class ForcedSubs(object):
def __init__(self, config):
self.log = logger.Logger('ForcedSubs', config['debug'], config['silent'])
self.lang = config['ForcedSubs']['language']
self.secsub_ratio = float(config['ForcedSubs']['ratio'])
self.mediainfoPath = config['ForcedSubs']['mediainfoPath']
self.mkvpropeditPath = config['ForcedSubs']['mkvpropeditPath']
if (self.mediainfoPath and
os.path.dirname(self.mediainfoPath) not in os.environ['PATH']):
os.environ['PATH'] = (os.path.dirname(config['ForcedSubs']['mediainfoPath']) + ';' +
os.environ['PATH'])
if (self.mkvpropeditPath and
os.path.dirname(self.mkvpropeditPath) not in os.environ['PATH']):
os.environ['PATH'] = (os.path.dirname(config['ForcedSubs']['mkvpropeditPath']) + ';' +
os.environ['PATH'])
def discover_forcedsubs(self, dbvideo):
"""
Attempts to find foreign subtitle track
Input:
dbvideo (Obj): Video database object
Output:
If successful, track number of forced subtitle
Else, None
"""
MEDIADIR = os.path.join(dbvideo.path, dbvideo.filename)
# wrapper class for mediainfo tool
media_info = MediaInfo.parse(MEDIADIR.encode('unicode-escape'))
subs = []
# Iterates though tracks and finds subtitles in preferred language, creates
# list of dictionaries
for track in media_info.tracks:
data = track.to_data()
if data['track_type'] == 'Text' and data['language']==self.lang:
subs.append(data)
if len(subs) is 0:
self.log.info("No subtitle found, cannot determine foreign language track.")
return None
if len(subs) is 1:
self.log.info("Only one {} subtitle found, cannot determine foreign language track."
.format(self.lang))
return None
# Sort list by size of track file
subs.sort(key=lambda sub: sub['stream_size'], reverse = True)
# Main language subtitle assumed to be largest
main_sub = subs[0]
main_subsize = main_sub['stream_size']
main_sublen = float(main_sub['duration'])
# Checks other subs for size, duration, and if forced flag is set
for sub in subs[1:]:
if (
sub['stream_size'] <= main_subsize*self.secsub_ratio
and main_sublen*.9 <= float(sub['duration']) <= main_sublen*1.1
and sub['forced']=='No'
):
secondary_sub = sub
else:
self.log.info("No foreign language subtitle found, try adjusting ratio.")
return None
return secondary_sub['track_id']
def flag_forced(self, dbvideo, track):
"""
Uses mkvpropedit to edit mkv header and flag the detected track as 'forced'
Input:
dbvideo (Obj): Video database object
track (int): Track number of foreign track to be flagged as 'forced'
Output:
Bool: Returns True of successful, returns False if not
"""
MEDIADIR = os.path.join(dbvideo.path, dbvideo.filename)
cmd_raw = 'mkvpropedit {} --edit track:{} --set flag-forced=1'.format(quote(MEDIADIR), track)
cmd = shlex.split(cmd_raw)
self.log.debug("mkpropedit cmd: {}".format(cmd))
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
(results, error) = proc.communicate()
if proc.returncode is not 0:
self.log.error(
"mkvpropedit (forced subtitles) returned status code {}".format(proc.returncode)
)
return False
if len(results) is not 0:
lines = results.split('\n')
for line in lines:
self.log.debug(line.strip())
return True
|
pvlib/tests/test_snow.py | JackKelly/pvlib-python | 695 | 12629554 | <gh_stars>100-1000
import numpy as np
import pandas as pd
from .conftest import assert_series_equal
from pvlib import snow
from pvlib.tools import sind
def test_fully_covered_nrel():
dt = pd.date_range(start="2019-1-1 12:00:00", end="2019-1-1 18:00:00",
freq='1h')
snowfall_data = pd.Series([1, 5, .6, 4, .23, -5, 19], index=dt)
expected = pd.Series([False, True, False, True, False, False, True],
index=dt)
fully_covered = snow.fully_covered_nrel(snowfall_data)
assert_series_equal(expected, fully_covered)
def test_coverage_nrel_hourly():
surface_tilt = 45
slide_amount_coefficient = 0.197
dt = pd.date_range(start="2019-1-1 10:00:00", end="2019-1-1 17:00:00",
freq='1h')
poa_irradiance = pd.Series([400, 200, 100, 1234, 134, 982, 100, 100],
index=dt)
temp_air = pd.Series([10, 2, 10, 1234, 34, 982, 10, 10], index=dt)
snowfall_data = pd.Series([1, .5, .6, .4, .23, -5, .1, .1], index=dt)
snow_coverage = snow.coverage_nrel(
snowfall_data, poa_irradiance, temp_air, surface_tilt,
threshold_snowfall=0.6)
slide_amt = slide_amount_coefficient * sind(surface_tilt)
covered = 1.0 - slide_amt * np.array([0, 1, 2, 3, 4, 5, 6, 7])
expected = pd.Series(covered, index=dt)
assert_series_equal(expected, snow_coverage)
def test_coverage_nrel_subhourly():
surface_tilt = 45
slide_amount_coefficient = 0.197
dt = pd.date_range(start="2019-1-1 11:00:00", end="2019-1-1 14:00:00",
freq='15T')
poa_irradiance = pd.Series([400, 200, 100, 1234, 134, 982, 100, 100, 100,
100, 100, 100, 0],
index=dt)
temp_air = pd.Series([10, 2, 10, 1234, 34, 982, 10, 10, 10, 10, -10, -10,
10], index=dt)
snowfall_data = pd.Series([1, .5, .6, .4, .23, -5, .1, .1, 0., 1., 0., 0.,
0.], index=dt)
snow_coverage = snow.coverage_nrel(
snowfall_data, poa_irradiance, temp_air, surface_tilt)
slide_amt = slide_amount_coefficient * sind(surface_tilt) * 0.25
covered = np.append(np.array([1., 1., 1., 1.]),
1.0 - slide_amt * np.array([1, 2, 3, 4, 5]))
covered = np.append(covered, np.array([1., 1., 1., 1. - slide_amt]))
expected = pd.Series(covered, index=dt)
assert_series_equal(expected, snow_coverage)
def test_fully_covered_nrel_irregular():
# test when frequency is not specified and can't be inferred
dt = pd.DatetimeIndex(["2019-1-1 11:00:00", "2019-1-1 14:30:00",
"2019-1-1 15:07:00", "2019-1-1 14:00:00"])
snowfall_data = pd.Series([1, .5, .6, .4], index=dt)
snow_coverage = snow.fully_covered_nrel(snowfall_data,
threshold_snowfall=0.5)
covered = np.array([False, False, True, False])
expected = pd.Series(covered, index=dt)
assert_series_equal(expected, snow_coverage)
def test_coverage_nrel_initial():
surface_tilt = 45
slide_amount_coefficient = 0.197
dt = pd.date_range(start="2019-1-1 10:00:00", end="2019-1-1 17:00:00",
freq='1h')
poa_irradiance = pd.Series([400, 200, 100, 1234, 134, 982, 100, 100],
index=dt)
temp_air = pd.Series([10, 2, 10, 1234, 34, 982, 10, 10], index=dt)
snowfall_data = pd.Series([0, .5, .6, .4, .23, -5, .1, .1], index=dt)
snow_coverage = snow.coverage_nrel(
snowfall_data, poa_irradiance, temp_air, surface_tilt,
initial_coverage=0.5, threshold_snowfall=1.)
slide_amt = slide_amount_coefficient * sind(surface_tilt)
covered = 0.5 - slide_amt * np.array([0, 1, 2, 3, 4, 5, 6, 7])
covered = np.where(covered < 0, 0., covered)
expected = pd.Series(covered, index=dt)
assert_series_equal(expected, snow_coverage)
def test_dc_loss_nrel():
num_strings = 8
snow_coverage = pd.Series([1, 1, .5, .6, .2, .4, 0])
expected = pd.Series([1, 1, .5, .625, .25, .5, 0])
actual = snow.dc_loss_nrel(snow_coverage, num_strings)
assert_series_equal(expected, actual)
|
test-crates/pyo3-ffi-pure/check_installed/check_installed.py | Contextualist/maturin | 135 | 12629562 | <reponame>Contextualist/maturin<filename>test-crates/pyo3-ffi-pure/check_installed/check_installed.py<gh_stars>100-1000
#!/usr/bin/env python3
import pyo3_ffi_pure
assert pyo3_ffi_pure.sum(2, 40) == 42
print("SUCCESS")
|
Language Skills/Python/Unit 02 Strings and Console Output/02 Date and Time/2-Getting the current date and time.py | rhyep/Python_tutorials | 346 | 12629567 | from datetime import datetime
now = datetime.now()
print now
|
api-inference-community/docker_images/generic/tests/test_api_text_to_speech.py | mlonaws/huggingface_hub | 362 | 12629573 | <reponame>mlonaws/huggingface_hub<filename>api-inference-community/docker_images/generic/tests/test_api_text_to_speech.py
import os
from unittest import TestCase, skipIf
from api_inference_community.validation import ffmpeg_read
from starlette.testclient import TestClient
from tests.test_api import TESTABLE_MODELS
@skipIf(
"text-to-speech" not in TESTABLE_MODELS,
"text-to-speech not implemented",
)
class TextToSpeechTestCase(TestCase):
def setUp(self):
model_id = TESTABLE_MODELS["text-to-speech"]
self.old_model_id = os.getenv("MODEL_ID")
self.old_task = os.getenv("TASK")
os.environ["MODEL_ID"] = model_id
os.environ["TASK"] = "text-to-speech"
from app.main import app
self.app = app
@classmethod
def setUpClass(cls):
from app.main import get_pipeline
get_pipeline.cache_clear()
def tearDown(self):
if self.old_model_id is not None:
os.environ["MODEL_ID"] = self.old_model_id
else:
del os.environ["MODEL_ID"]
if self.old_task is not None:
os.environ["TASK"] = self.old_task
else:
del os.environ["TASK"]
def test_simple(self):
with TestClient(self.app) as client:
response = client.post("/", json={"inputs": "This is some text"})
self.assertEqual(
response.status_code,
200,
)
self.assertEqual(response.headers["content-type"], "audio/flac")
audio = ffmpeg_read(response.content)
self.assertEqual(len(audio.shape), 1)
self.assertGreater(audio.shape[0], 1000)
def test_malformed_input(self):
with TestClient(self.app) as client:
response = client.post("/", data=b"\xc3\x28")
self.assertEqual(
response.status_code,
400,
)
self.assertEqual(
response.content,
b'{"error":"\'utf-8\' codec can\'t decode byte 0xc3 in position 0: invalid continuation byte"}',
)
|
tests/test_basic.py | tolomea/django-auto-prefetch | 127 | 12629582 | <filename>tests/test_basic.py<gh_stars>100-1000
import gc
import pickle
import pytest
from django.core.exceptions import ObjectDoesNotExist
import auto_prefetch
from . import models
def test_check_meta_inheritance_fail():
class TestModelBase(auto_prefetch.Model):
class Meta:
abstract = True
class TestModel1(TestModelBase):
pass
errors = TestModel1.check()
assert len(errors) == 1
assert errors[0].id == "auto_prefetch.E001"
assert errors[0].obj is TestModel1
assert errors[0].msg == (
"TestModel1 inherits from auto_prefetch.Model, but its Meta class does"
+ " not inherit from auto_prefetch.Model.Meta"
)
def test_check_meta_inheritance_success():
class TestModel2(auto_prefetch.Model):
class Meta(auto_prefetch.Model.Meta):
verbose_name = "My model"
errors = TestModel2.check()
assert errors == []
@pytest.mark.parametrize(
"Model,queries",
[
(models.Vanilla, 4),
(models.Prefetch, 2),
(models.MixedModel, 4),
(models.MixedField, 4),
],
)
@pytest.mark.django_db
def test_basic(django_assert_num_queries, Model, queries):
friend = models.Friend.objects.create()
[Model.objects.create(friend=friend) for _ in range(3)]
with django_assert_num_queries(queries):
for obj in Model.objects.all():
print(obj.pk, obj.friend.pk)
@pytest.mark.parametrize(
"Model,queries",
[
(models.Vanilla, 2),
(models.Prefetch, 2),
(models.MixedModel, 2),
(models.MixedField, 2),
],
)
@pytest.mark.django_db
def test_no_peers(django_assert_num_queries, Model, queries):
friend = models.Friend.objects.create()
Model.objects.create(friend=friend)
with django_assert_num_queries(queries):
for obj in Model.objects.all():
print(obj.pk, obj.friend.pk)
@pytest.mark.parametrize(
"Model,queries",
[
(models.Vanilla, 1),
(models.Prefetch, 1),
(models.MixedModel, 1),
(models.MixedField, 1),
(models.VanillaForward, 1),
(models.PrefetchForward, 1),
(models.VanillaReverse, 4),
(models.PrefetchReverse, 2),
],
)
@pytest.mark.django_db
def test_null(django_assert_num_queries, Model, queries):
[Model.objects.create() for _ in range(3)]
with django_assert_num_queries(queries):
for obj in Model.objects.all():
try:
print(obj.pk, obj.friend)
except ObjectDoesNotExist:
pass
@pytest.mark.parametrize(
"Model,queries",
[
(models.Vanilla, 1),
(models.Prefetch, 1),
(models.MixedModel, 1),
(models.MixedField, 1),
],
)
@pytest.mark.django_db
def test_values(django_assert_num_queries, Model, queries):
friend = models.Friend.objects.create()
[Model.objects.create(friend=friend) for _ in range(3)]
with django_assert_num_queries(queries):
for obj_pk, friend_pk in Model.objects.values_list("pk", "friend__pk"):
print(obj_pk, friend_pk)
@pytest.mark.parametrize(
"Model,queries",
[
(models.Vanilla, 7),
(models.Prefetch, 2),
(models.MixedModel, 7),
(models.MixedField, 7),
],
)
@pytest.mark.django_db
def test_multiples(django_assert_num_queries, Model, queries):
friend = models.Friend.objects.create()
associates = [models.Associate.objects.create(number=6) for _ in range(2)]
for _ in range(3):
obj = Model.objects.create(friend=friend)
obj.associates.set(associates)
with django_assert_num_queries(queries):
objs = list(Model.objects.filter(associates__number__gt=1))
assert len(objs) == 6
for obj in objs:
print(obj.pk, obj.friend)
@pytest.mark.django_db
def test_garbage_collection():
def check_instances(num):
gc.collect()
objs = [o for o in gc.get_objects() if isinstance(o, models.Prefetch)]
assert len(objs) == num
friend = models.Friend.objects.create()
[models.Prefetch.objects.create(friend=friend) for _ in range(3)]
del friend
check_instances(0)
objs = list(models.Prefetch.objects.all())
check_instances(3)
obj = objs[0]
del objs
check_instances(1)
print(obj.pk, obj.friend)
@pytest.mark.parametrize(
"Model,Model2,queries",
[(models.Vanilla, models.Vanilla2, 7), (models.Prefetch, models.Prefetch2, 3)],
)
@pytest.mark.django_db
def test_cascading(django_assert_num_queries, Model, Model2, queries):
friend = models.Friend.objects.create()
for _ in range(3):
obj = Model.objects.create(friend=friend)
Model2.objects.create(other=obj)
with django_assert_num_queries(queries):
for obj in Model2.objects.all():
print(obj.pk, obj.other.pk, obj.other.friend.pk)
@pytest.mark.parametrize(
"Model,FriendModel,queries",
[
(models.VanillaForward, models.VanillaReverse, 4),
(models.PrefetchForward, models.PrefetchReverse, 2),
],
)
@pytest.mark.django_db
def test_basic_one2one(django_assert_num_queries, Model, FriendModel, queries):
for _ in range(3):
friend = FriendModel.objects.create()
Model.objects.create(friend=friend)
with django_assert_num_queries(queries):
for obj in Model.objects.all():
print(obj.pk, obj.friend.pk)
with django_assert_num_queries(queries):
for obj in FriendModel.objects.all():
print(obj.pk, obj.friend.pk)
@pytest.mark.parametrize(
"Model,FriendModel,queries",
[
(models.VanillaForward, models.VanillaReverse, 2),
(models.PrefetchForward, models.PrefetchReverse, 2),
],
)
@pytest.mark.django_db
def test_one2one_no_peers(django_assert_num_queries, Model, FriendModel, queries):
friend = FriendModel.objects.create()
Model.objects.create(friend=friend)
with django_assert_num_queries(queries):
for obj in Model.objects.all():
print(obj.pk, obj.friend.pk)
with django_assert_num_queries(queries):
for obj in FriendModel.objects.all():
print(obj.pk, obj.friend.pk)
@pytest.mark.parametrize(
"Model,queries",
[
(models.Vanilla, 4),
(models.Prefetch, 4),
(models.MixedModel, 4),
(models.MixedField, 4),
],
)
@pytest.mark.django_db
def test_pickle(django_assert_num_queries, Model, queries):
friend = models.Friend.objects.create()
[Model.objects.create(friend=friend) for _ in range(3)]
with django_assert_num_queries(queries):
for obj in Model.objects.all():
obj = pickle.loads(pickle.dumps(obj))
print(obj.pk, obj.friend.pk)
|
ufora/util/PythonCodeUtils.py | ufora/ufora | 571 | 12629583 | <filename>ufora/util/PythonCodeUtils.py
# Copyright 2015 Ufora Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import dis
import opcode
LOAD_FAST = opcode.opmap['LOAD_FAST']
LOAD_ATTR = opcode.opmap['LOAD_ATTR']
RETURN_VALUE = opcode.opmap['RETURN_VALUE']
def isSimpleFunction(x):
"""if a function looks like lambda self: self.a.b.c...,
this function returns (True, ['a',b,'...]) otherfwise (False, ())
"""
if not isinstance(x, type(lambda:0)):
return False, ()
c = x.func_code.co_code
vals = [ord(op) for op in c]
if x.func_code.co_argcount != 1:
return False, ()
if not vals:
return False, ()
if vals[0] != LOAD_FAST:
return False, ()
if vals[1] != 0 or vals[2] != 0:
return False, ()
if vals[-1] != RETURN_VALUE:
return False, ()
if (len(vals) - 1) % 3 != 0:
return False, ()
loads = (len(vals) - 1)/3
if vals[3:-1:3] != [LOAD_ATTR] * (loads-1):
return False, ()
if vals[2::3] != [0] * loads:
return False, ()
lookups = []
varnames = x.func_code.co_names
lookups = [varnames[l] for l in vals[4::3]]
return True, lookups
|
PyObjCTest/test_nsmetadata.py | Khan/pyobjc-framework-Cocoa | 132 | 12629636 | from Foundation import *
from PyObjCTools.TestSupport import *
try:
unicode
except NameError:
unicode = str
class TestNSMetaData (TestCase):
def testConstants(self):
self.assertIsInstance(NSMetadataQueryDidStartGatheringNotification, unicode)
self.assertIsInstance(NSMetadataQueryGatheringProgressNotification, unicode)
self.assertIsInstance(NSMetadataQueryDidFinishGatheringNotification, unicode)
self.assertIsInstance(NSMetadataQueryDidUpdateNotification, unicode)
self.assertIsInstance(NSMetadataQueryResultContentRelevanceAttribute, unicode)
self.assertIsInstance(NSMetadataQueryUserHomeScope, unicode)
self.assertIsInstance(NSMetadataQueryLocalComputerScope, unicode)
self.assertIsInstance(NSMetadataQueryNetworkScope, unicode)
@min_os_level('10.7')
def testConstants10_7(self):
self.assertIsInstance(NSMetadataQueryLocalDocumentsScope, unicode)
self.assertIsInstance(NSMetadataQueryUbiquitousDocumentsScope, unicode)
self.assertIsInstance(NSMetadataQueryUbiquitousDataScope, unicode)
self.assertIsInstance(NSMetadataItemFSNameKey, unicode)
self.assertIsInstance(NSMetadataItemDisplayNameKey, unicode)
self.assertIsInstance(NSMetadataItemURLKey, unicode)
self.assertIsInstance(NSMetadataItemPathKey, unicode)
self.assertIsInstance(NSMetadataItemFSSizeKey, unicode)
self.assertIsInstance(NSMetadataItemFSCreationDateKey, unicode)
self.assertIsInstance(NSMetadataItemFSContentChangeDateKey, unicode)
self.assertIsInstance(NSMetadataItemIsUbiquitousKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemHasUnresolvedConflictsKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsDownloadingKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsUploadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemIsUploadingKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemPercentDownloadedKey, unicode)
self.assertIsInstance(NSMetadataUbiquitousItemPercentUploadedKey, unicode)
def testMethods(self):
self.assertResultIsBOOL(NSMetadataQuery.startQuery)
self.assertResultIsBOOL(NSMetadataQuery.isStarted)
self.assertResultIsBOOL(NSMetadataQuery.isGathering)
self.assertResultIsBOOL(NSMetadataQuery.isStopped)
if __name__ == "__main__":
main()
|
common/utils/manopth/mano/webuser/posemapper.py | Alan-delete/I2L-MeshNet_RELEASE | 544 | 12629682 | '''
Copyright 2017 <NAME>, <NAME>, <NAME> and the Max Planck Gesellschaft. All rights reserved.
This software is provided for research purposes only.
By using this software you agree to the terms of the MANO/SMPL+H Model license here http://mano.is.tue.mpg.de/license
More information about MANO/SMPL+H is available at http://mano.is.tue.mpg.de.
For comments or questions, please email us at: <EMAIL>
About this file:
================
This file defines a wrapper for the loading functions of the MANO model.
Modules included:
- load_model:
loads the MANO model from a given file location (i.e. a .pkl file location),
or a dictionary object.
'''
import chumpy as ch
import numpy as np
import cv2
class Rodrigues(ch.Ch):
dterms = 'rt'
def compute_r(self):
return cv2.Rodrigues(self.rt.r)[0]
def compute_dr_wrt(self, wrt):
if wrt is self.rt:
return cv2.Rodrigues(self.rt.r)[1].T
def lrotmin(p):
if isinstance(p, np.ndarray):
p = p.ravel()[3:]
return np.concatenate(
[(cv2.Rodrigues(np.array(pp))[0] - np.eye(3)).ravel()
for pp in p.reshape((-1, 3))]).ravel()
if p.ndim != 2 or p.shape[1] != 3:
p = p.reshape((-1, 3))
p = p[1:]
return ch.concatenate([(Rodrigues(pp) - ch.eye(3)).ravel()
for pp in p]).ravel()
def posemap(s):
if s == 'lrotmin':
return lrotmin
else:
raise Exception('Unknown posemapping: %s' % (str(s), ))
|
notebook/opencv_videocapture_play_cam.py | vhn0912/python-snippets | 174 | 12629699 | import cv2
import sys
camera_id = 0
delay = 1
window_name = 'frame'
cap = cv2.VideoCapture(camera_id)
if not cap.isOpened():
sys.exit()
while True:
ret, frame = cap.read()
cv2.imshow(window_name, frame)
if cv2.waitKey(delay) & 0xFF == ord('q'):
break
cv2.destroyWindow(window_name)
|
app/config/__init__.py | kaczmarj/grand-challenge.org | 101 | 12629703 | <gh_stars>100-1000
from django.conf import settings
from config.celery import celery_app
__all__ = ["celery_app"]
def toolbar_callback(*_, **__):
return settings.DEBUG and settings.ENABLE_DEBUG_TOOLBAR
|
renderer/od_renderer.py | archonic/frankmocap | 1,612 | 12629746 | # Copyright (c) Facebook, Inc. and its affiliates.
"""
Renders mesh using OpenDr / Pytorch-3d for visualization.
Part of code is modified from https://github.com/akanazawa/hmr
"""
import sys
import numpy as np
import cv2
import pdb
from PIL import Image, ImageDraw
from opendr.camera import ProjectPoints
from opendr.renderer import ColoredRenderer
from opendr.lighting import LambertianPointLight
class OpendrRenderer(object):
def __init__(self,
img_size=224,
mesh_color=np.array([0.5, 0.5, 0.5]),):
self.w = img_size
self.h = img_size
self.color = mesh_color
self.img_size = img_size
self.flength = 500.
def render(self, verts, faces, bg_img):
verts = verts.copy()
faces = faces.copy()
input_size = 500
f = 10
verts[:, 0] = (verts[:, 0] - input_size) / input_size
verts[:, 1] = (verts[:, 1] - input_size) / input_size
verts[:, 2] /= (5 * 112)
verts[:, 2] += f
cam_for_render = np.array([f, 1, 1]) * input_size
rend_img = self.__call__(
img=bg_img, cam=cam_for_render,
verts=verts, faces=faces, color=self.color)
return rend_img
def __call__(self,
verts,
faces,
cam=None,
img=None,
do_alpha=False,
far=None,
near=None,
color = np.array([0, 0, 255]),
img_size=None):
"""
cam is 3D [f, px, py]
"""
if img is not None:
h, w = img.shape[:2]
elif img_size is not None:
h = img_size[0]
w = img_size[1]
else:
h = self.h
w = self.w
if cam is None:
cam = [self.flength, w / 2., h / 2.]
use_cam = ProjectPoints(
f=cam[0] * np.ones(2),
rt=np.zeros(3),
t=np.zeros(3),
k=np.zeros(5),
c=cam[1:3])
if near is None:
near = np.maximum(np.min(verts[:, 2]) - 25, 0.1)
if far is None:
far = np.maximum(np.max(verts[:, 2]) + 25, 25)
return_value = render_model(
verts,
faces,
w,
h,
use_cam,
do_alpha=do_alpha,
img=img,
far=far,
near=near,
color=color)
imtmp = return_value
image = (imtmp * 255).astype('uint8')
return image
def _create_renderer(w=640,
h=480,
rt=np.zeros(3),
t=np.zeros(3),
f=None,
c=None,
k=None,
near=.5,
far=10.):
f = np.array([w, w]) / 2. if f is None else f
c = np.array([w, h]) / 2. if c is None else c
k = np.zeros(5) if k is None else k
rn = ColoredRenderer()
rn.camera = ProjectPoints(rt=rt, t=t, f=f, c=c, k=k)
rn.frustum = {'near': near, 'far': far, 'height': h, 'width': w}
return rn
def _rotateY(points, angle):
"""Rotate the points by a specified angle."""
ry = np.array([[np.cos(angle), 0., np.sin(angle)], [0., 1., 0.],
[-np.sin(angle), 0., np.cos(angle)]])
return np.dot(points, ry)
def simple_renderer(rn,
verts,
faces,
yrot=np.radians(70),
color=np.array([0, 0, 255])
):
# Rendered model color
rn.set(v=verts, f=faces, vc=color, bgcolor=np.ones(3))
albedo = rn.vc
# Construct Back Light (on back right corner)
rn.vc = LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-200, -100, -100]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Left Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
# light_pos=_rotateY(np.array([800, 10, 300]), yrot),
light_pos=_rotateY(np.array([800, 10, 300]), yrot),
vc=albedo,
light_color=np.array([1, 1, 1]))
# Construct Right Light
rn.vc += LambertianPointLight(
f=rn.f,
v=rn.v,
num_verts=len(rn.v),
light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
# light_pos=_rotateY(np.array([-500, 500, 1000]), yrot),
vc=albedo,
light_color=np.array([.7, .7, .7]))
return rn.r
def get_alpha(imtmp, bgval=1.):
h, w = imtmp.shape[:2]
alpha = (~np.all(imtmp == bgval, axis=2)).astype(imtmp.dtype)
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha.astype(
imtmp.dtype)))
return im_RGBA
def append_alpha(imtmp):
alpha = np.ones_like(imtmp[:, :, 0]).astype(imtmp.dtype)
if np.issubdtype(imtmp.dtype, np.uint8):
alpha = alpha * 255
b_channel, g_channel, r_channel = cv2.split(imtmp)
im_RGBA = cv2.merge((b_channel, g_channel, r_channel, alpha))
return im_RGBA
def render_model(verts,
faces,
w,
h,
cam,
near=0.5,
far=25,
img=None,
do_alpha=False,
color=None):
rn = _create_renderer(
w=w, h=h, near=near, far=far, rt=cam.rt, t=cam.t, f=cam.f, c=cam.c)
# Uses img as background, otherwise white background.
if img is not None:
rn.background_image = img / 255. if img.max() > 1.1 else img
imtmp = simple_renderer(rn, verts, faces, color=color)
# If white bg, make transparent.
if img is None and do_alpha:
imtmp = get_alpha(imtmp)
elif img is not None and do_alpha:
imtmp = append_alpha(imtmp)
return imtmp |
h2o-py/tests/testdir_algos/glm/pyunit_solvers_glm.py | ahmedengu/h2o-3 | 6,098 | 12629754 | from __future__ import print_function
import sys
from h2o.exceptions import H2OResponseError
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
def glm_solvers():
predictors = ["displacement","power","weight","acceleration","year"]
for solver in ["AUTO", "IRLSM", "L_BFGS", "COORDINATE_DESCENT_NAIVE", "COORDINATE_DESCENT"]:
print("Solver = {0}".format(solver))
for family in ["binomial", "gaussian", "poisson", "tweedie", "gamma"]:
if family == 'binomial': response_col = "economy_20mpg"
elif family == 'gaussian': response_col = "economy"
else: response_col = "cylinders"
print("Family = {0}".format(family))
training_data = h2o.import_file(pyunit_utils.locate("smalldata/junit/cars_20mpg.csv"))
if family == 'binomial': training_data[response_col] = training_data[response_col].asfactor()
else: training_data[response_col] = training_data[response_col].asnumeric()
model = H2OGeneralizedLinearEstimator(family=family, alpha=0, Lambda=1e-5, solver=solver)
model.train(x=predictors, y=response_col, training_frame=training_data)
h2o.remove(training_data)
if __name__ == "__main__":
pyunit_utils.standalone_test(glm_solvers)
else:
glm_solvers()
|
housekeeping/filter_compile_commands.py | Alexey-N-Chernyshov/cpp-libp2p | 135 | 12629792 | <gh_stars>100-1000
#!/usr/bin/env python3
import sys
import json
import argparse
from pathlib import Path
import re
def remove_non_whitelist(wl):
def f(x):
for w in wl:
if w in x['file']:
print("selected for analysis: {}".format(w))
return True
return False
return f
def make_backup(src: Path):
dest = Path(src.absolute().__str__() + ".backup")
if dest.exists():
return # backup is already there
dest.touch()
dest.write_text(src.read_text()) # for text files
assert dest.exists()
def do(args) -> None:
builddir = args.p
if not builddir.exists():
raise Exception("build dir {} does not exist".format(builddir))
if not builddir.is_dir():
raise Exception("build dir {} is not dir".format(builddir))
p = Path(builddir, "compile_commands.json")
if not p.exists():
raise Exception("build dir {} does not contain compile_commands.json".format(builddir))
make_backup(p)
with p.open("r") as f:
data = f.read()
j = json.loads(data)
wl = read_whitelist()
j = filter(remove_non_whitelist(wl), j)
j = list(j)
s = json.dumps(j, indent=4)
with p.open("w") as w:
w.write(s)
print("success")
def read_whitelist():
whitelist = []
print("provide whitelisted files, one path on a single line")
for line in sys.stdin:
lines = re.split("[\s\n]+", line)
for l in lines:
if len(l) > 0:
whitelist.append(l)
return whitelist
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog="filter "
)
parser.add_argument("-p", help="path to build dir",
type=Path)
args = parser.parse_args()
do(args)
|
src/python/nimbusml/examples/PrefixColumnConcatenator.py | montehoover/NimbusML | 134 | 12629800 | <filename>src/python/nimbusml/examples/PrefixColumnConcatenator.py<gh_stars>100-1000
###############################################################################
# PrefixColumnConcatenator
import numpy as np
import pandas as pd
from nimbusml.preprocessing.schema import PrefixColumnConcatenator
data = pd.DataFrame(
data=dict(
PrefixA=[2.5, np.nan, 2.1, 1.0],
PrefixB=[.75, .9, .8, .76],
AnotherColumn=[np.nan, 2.5, 2.6, 2.4]))
# transform usage
xf = PrefixColumnConcatenator(columns={'combined': 'Prefix'})
# fit and transform
features = xf.fit_transform(data)
# print features
print(features.head())
# PrefixA PrefixB AnotherColumn combined.PrefixA combined.PrefixB
#0 2.5 0.75 NaN 2.5 0.75
#1 NaN 0.90 2.5 NaN 0.90
#2 2.1 0.80 2.6 2.1 0.80
#3 1.0 0.76 2.4 1.0 0.76 |
evennia/web/utils/general_context.py | Jaykingamez/evennia | 1,544 | 12629816 | #
# This file defines global variables that will always be
# available in a view context without having to repeatedly
# include it. For this to work, this file is included in
# the settings file, in the TEMPLATE_CONTEXT_PROCESSORS
# tuple.
#
import os
from django.conf import settings
from evennia.utils.utils import get_evennia_version
# Determine the site name and server version
def set_game_name_and_slogan():
"""
Sets global variables GAME_NAME and GAME_SLOGAN which are used by
general_context.
Notes:
This function is used for unit testing the values of the globals.
"""
global GAME_NAME, GAME_SLOGAN, SERVER_VERSION
try:
GAME_NAME = settings.SERVERNAME.strip()
except AttributeError:
GAME_NAME = "Evennia"
SERVER_VERSION = get_evennia_version()
try:
GAME_SLOGAN = settings.GAME_SLOGAN.strip()
except AttributeError:
GAME_SLOGAN = SERVER_VERSION
set_game_name_and_slogan()
# Setup lists of the most relevant apps so
# the adminsite becomes more readable.
ACCOUNT_RELATED = ["Accounts"]
GAME_ENTITIES = ["Objects", "Scripts", "Comms", "Help"]
GAME_SETUP = ["Permissions", "Config"]
CONNECTIONS = ["Irc"]
WEBSITE = ["Flatpages", "News", "Sites"]
def set_webclient_settings():
"""
As with set_game_name_and_slogan above, this sets global variables pertaining
to webclient settings.
Notes:
Used for unit testing.
"""
global WEBCLIENT_ENABLED, WEBSOCKET_CLIENT_ENABLED, WEBSOCKET_PORT, WEBSOCKET_URL
WEBCLIENT_ENABLED = settings.WEBCLIENT_ENABLED
WEBSOCKET_CLIENT_ENABLED = settings.WEBSOCKET_CLIENT_ENABLED
# if we are working through a proxy or uses docker port-remapping, the webclient port encoded
# in the webclient should be different than the one the server expects. Use the environment
# variable WEBSOCKET_CLIENT_PROXY_PORT if this is the case.
WEBSOCKET_PORT = int(
os.environ.get("WEBSOCKET_CLIENT_PROXY_PORT", settings.WEBSOCKET_CLIENT_PORT)
)
# this is determined dynamically by the client and is less of an issue
WEBSOCKET_URL = settings.WEBSOCKET_CLIENT_URL
set_webclient_settings()
# The main context processor function
def general_context(request):
"""
Returns common Evennia-related context stuff, which
is automatically added to context of all views.
"""
account = None
if request.user.is_authenticated:
account = request.user
puppet = None
if account and request.session.get("puppet"):
pk = int(request.session.get("puppet"))
puppet = next((x for x in account.characters if x.pk == pk), None)
return {
"account": account,
"puppet": puppet,
"game_name": GAME_NAME,
"game_slogan": GAME_SLOGAN,
"evennia_userapps": ACCOUNT_RELATED,
"evennia_entityapps": GAME_ENTITIES,
"evennia_setupapps": GAME_SETUP,
"evennia_connectapps": CONNECTIONS,
"evennia_websiteapps": WEBSITE,
"webclient_enabled": WEBCLIENT_ENABLED,
"websocket_enabled": WEBSOCKET_CLIENT_ENABLED,
"websocket_port": WEBSOCKET_PORT,
"websocket_url": WEBSOCKET_URL,
}
|
tests/components/mythicbeastsdns/__init__.py | domwillcode/home-assistant | 30,023 | 12629820 | <gh_stars>1000+
"""Tests for the mythicbeastsdns component."""
|
src/websockets/http.py | m-novikov/websockets | 3,909 | 12629858 | <reponame>m-novikov/websockets<filename>src/websockets/http.py
from __future__ import annotations
import sys
from .imports import lazy_import
from .version import version as websockets_version
# For backwards compatibility:
lazy_import(
globals(),
# Headers and MultipleValuesError used to be defined in this module.
aliases={
"Headers": ".datastructures",
"MultipleValuesError": ".datastructures",
},
deprecated_aliases={
"read_request": ".legacy.http",
"read_response": ".legacy.http",
},
)
__all__ = ["USER_AGENT"]
PYTHON_VERSION = "{}.{}".format(*sys.version_info)
USER_AGENT = f"Python/{PYTHON_VERSION} websockets/{websockets_version}"
|
setup.py | salesforce/provis | 244 | 12629862 | <reponame>salesforce/provis
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt', 'r') as reqs:
requirements = reqs.read().split()
setup(
name='provis',
packages=["protein_attention"],
version='0.0.1',
install_requires=requirements,
) |
lib-other/deterministiclib/CustomMath_test.py | endolith/Truthcoin | 161 | 12629886 | <reponame>endolith/Truthcoin
def test_GetWeight():
print(GetWeight([1,1,1,1]))
# [1] 0.25 0.25 0.25 0.25
print(GetWeight([10,10,10,10]))
# [1] 0.25 0.25 0.25 0.25
print(GetWeight([4,5,6,7]))
# [1] 0.1818182 0.2272727 0.2727273 0.3181818
print(GetWeight([4,5,6,7], True))
# [1] 0.2159091 0.2386364 0.2613636 0.2840909
def catch_test():
print(Catch(Decimal('.4')))#0
print(Catch(Decimal('.5')))#0.5
print(Catch(Decimal('.6')))#1
print(Catch(Decimal('.6'), Tolerance=Decimal('0.1')))#1
print(Catch(Decimal('.6'), Tolerance=Decimal('0.2')))#0.5
def MeanNATest():
v=[3,4,6,7,8]
print(MeanNA(v))
# [1] 3 4 6 7 8
v=[3,'NA',6,7,8]
print(MeanNA(v))
# [1] 3 6 6 7 8
v=[0,0,0,1,'NA']
print(MeanNA(v))
# [1] 0.00 0.00 0.00 1.00 0.25
v=[0,0,'NA',1,'NA']
print(MeanNA(v))
# [1] 0.0000000 0.0000000 0.3333333 1.0000000 0.3333333
def rescale_test():
m=[[1, 1, 0, 0],#, 233, Decimal(16027.59)],
[1, 0, 0, 0],# 199, 'NA'],
[1, 1, 0, 0],# 233, 16027.59],
[1, 1, 1, 0],# 250, 'NA'],
[0, 0, 1, 1],# 435, 8001.00],
[0, 0, 1, 1]]#, 435, 19999.00]]
print(Rescale(m))
def influence_test():
print(Influence([Decimal('0.25')]*4))#[1,1,1,1]
print(Influence([Decimal('0.3')]*2+[Decimal('0.4')]))#[0.9,0.9,1.2]
print(Influence([Decimal('0.99')]+[Decimal('0.0025')]*4))
## [1] 4.9500 0.0125 0.0125 0.0125 0.0125
def reweight_test():
print(ReWeight([1,1,1,1]))#[.25, .25, .25, .25]
print(ReWeight(['NA',1,'NA',1]))#[0, .5, 0, .5]
print(ReWeight([2,4,6,12]))## [1] 0.08333333 0.16666667 0.25000000 0.50000000
print(ReWeight([2,4,'NA',12]))# [1] 0.1111111 0.2222222 0.0000000 0.6666667
def test_weighted_sample_mean():
m=[[1,0,1],[1,1,1],[1,0,1]]
c=[1,1,1]
Mat=numpy.ma.masked_array(m)
Coins=numpy.array(map(lambda x: [x], c))
print(weighted_sample_mean(m, c))
print(numpy.ma.average(Mat, axis=0, weights=numpy.hstack(Coins))) # Computing the weighted sample mean (fast, efficient and precise)
def test_subtract_vector():
m=[[1,0,1],[1,1,1],[1,0,1]]
Mat=numpy.ma.masked_array(m)
v=[.5, .5, 0]
#print(Mat)
print(numpy.matrix(Mat-numpy.array(v)))
print(subtract_vector(m, v))
def test_matrix_multiply():
m=[[1,0,1],[1,1,1],[1,0,1]]
Mat=numpy.ma.masked_array(m)
coins=numpy.ma.masked_array([[1],[1],[2]])
print(matrix_multiply(Mat, coins))
print(numpy.ma.multiply(Mat, coins))
def dot_test():
m=[[1,2,3],[1,0,0],[0,4,0]]
m2=[[1],[1],[0]]
Mat=numpy.ma.masked_array(m)
Mat2=numpy.ma.masked_array(m2)
a=numpy.dot(Mat, Mat2)
b=dot(m, m2)
print(a)
print(b)
def v_average_test():
import numpy.ma as ma
M=[[1,1,0],[0,0,1],[0,1,0]]
Coins=[100000,200000,300000]
Mat=numpy.matrix(M)
Mean = ma.average(Mat, axis=0, weights=numpy.hstack(Coins))
print(Mean)
print(v_average(M, ReWeight(Coins)))
def b_test():
from numpy import ma as ma
td=0.33333333333333333
XM=[[-td, -td, 2*td],
[2*td, 2*td, -td],
[-td, -td, -td]]
Coins=[1000000]*3
print(ma.multiply(XM, Coins).T.dot(XM))
print(dot(switch_row_cols(matrix_multiply(XM, Coins)), XM))
def weighted_cov_test():
Mat=[[0,0,1],[1,1,0],[0,0,0]]
print(WeightedCov(Mat, [1,1,1]))
def weighted_median_test():
print(weighted_median([3,4,5],[Decimal('.2'),Decimal('.2'),Decimal('.6')]))
print(weighted_median([3,4,5],[Decimal('.2'),Decimal('.2'),Decimal('.5')]))
print(weighted_median([3,4,5],[Decimal('.2'),Decimal('.2'),Decimal('.4')]))
def dot_test():
a=[[1,0],
[0,1]]
n=[[2,0],
[0,1]]
c=[[-33333.33333333, 66666.66666667, -33333.33333333],
[-33333.33333333, 66666.66666667, -33333.33333333],
[ 66666.66666667, -33333.33333333, -33333.33333333]]
XM=[[-0.33333333, -0.33333333, 0.66666667],
[ 0.66666667, 0.66666667, -0.33333333],
[-0.33333333, -0.33333333, -0.33333333]]
print(dot(c, XM))
import numpy
print(numpy.dot(c, XM))
def ma_multiply_test():
XM=[[1,0,1,0],[0,1,0,0],[0,0,100, 0]]
Coins=[[1],[2],[3]]
from numpy import ma
print(ma.multiply(XM, Coins))
print(ma_multiply(XM, Coins))
Coins=[1,2,3,4]
print(ma.multiply(XM, Coins))
print(ma_multiply(XM, Coins))
def weightedprincomp_test():
import pprint
M=[[0,0,1],[1,1,0],[0,0,0]]
V=[Decimal('.1')]*3#]#, [Decimal('.1')], [Decimal('.8')], [Decimal('.1')]]
a=WeightedPrinComp(M,V)
pprint.pprint(WeightedPrinComp(M,V))
|
tests/test_mdp.py | bgalbraith/macarico | 121 | 12629910 | <reponame>bgalbraith/macarico<gh_stars>100-1000
from __future__ import division, generators, print_function
import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable as Var
import macarico.util
macarico.util.reseed()
from macarico.annealing import ExponentialAnnealing, stochastic
from macarico.lts.maximum_likelihood import MaximumLikelihood
from macarico.lts.dagger import DAgger, TwistedDAgger
from macarico.lts.aggrevate import AggreVaTe
from macarico.lts.lols import lols
from macarico.annealing import EWMA
from macarico.features.actor import TransitionRNN, TransitionBOW
from macarico.features.sequence import AttendAt
from macarico.policies.linear import LinearPolicy
from macarico.tasks import mdp
class LearnerOpts:
MAXLIK = 'MaximumLikelihood'
DAGGER = 'DAgger'
TWISTED = 'TwistedDAgger'
AGGREVATE = 'AggreVaTe'
LOLS = 'LOLS'
def make_ross_mdp(T=100, reset_prob=0):
initial = [(0, 1/3), (1, 1/3)]
# s a s' p()
half_rp = reset_prob/2
default = 1-reset_prob
transitions = { 0: { 0: [(1, default), (0, half_rp), (2, half_rp)],
1: [(2, default), (0, half_rp), (1, half_rp)] },
1: { 0: [(2, default), (0, half_rp), (1, half_rp)],
1: [(1, default), (0, half_rp), (2, half_rp)] },
2: { 0: [(1, default), (1, half_rp), (2, half_rp)],
1: [(2, default), (0, half_rp), (2, half_rp)] } }
def pi_ref(s):
if isinstance(s, mdp.MDP):
s = s.s
# expert: s0->a0 s1->a1 s2->a0
if s == 0: return 0
if s == 1: return 1
if s == 2: return 0
assert False
def costs(s, a, s1):
# this is just Cmax=1 whenever we disagree with expert, and c=0 otherwise
return 0 if a == pi_ref(s) else 1
return mdp.MDPExample(initial, transitions, costs, T), \
mdp.DeterministicReference(pi_ref)
def test1(LEARNER=LearnerOpts.DAGGER):
print
print 'Running test 1 with learner=%s' % LEARNER
print '======================================================='
n_states = 3
n_actions = 2
tRNN = TransitionRNN(
[mdp.MDPFeatures(n_states, noise_rate=0.5)],
[AttendAt(lambda _: 0, 's')],
n_actions)
policy = LinearPolicy(tRNN, n_actions)
p_rollin_ref = stochastic(ExponentialAnnealing(0.99))
p_rollout_ref = stochastic(ExponentialAnnealing(1))
optimizer = torch.optim.Adam(policy.parameters(), lr=0.01)
test_mdp, pi_ref = make_ross_mdp()
if LEARNER == LearnerOpts.DAGGER:
learner = lambda: DAgger(pi_ref, policy, p_rollin_ref)
elif LEARNER == LearnerOpts.TWISTED:
learner = lambda: TwistedDAgger(pi_ref, policy, p_rollin_ref)
elif LEARNER == LearnerOpts.MAXLIK:
learner = lambda: MaximumLikelihood(pi_ref, policy)
elif LEARNER == LearnerOpts.AGGREVATE:
learner = lambda: AggreVaTe(pi_ref, policy, p_rollin_ref)
elif LEARNER == LearnerOpts.LOLS:
learner = None
losses = []
for epoch in xrange(101):
optimizer.zero_grad()
if learner is not None:
l = learner()
env = test_mdp.mk_env()
res = env.run_episode(l)
loss = mdp.MDPLoss()(test_mdp, env)
l.update(loss)
elif LEARNER == LearnerOpts.LOLS:
lols(test_mdp, mdp.MDPLoss, pi_ref, policy, p_rollin_ref, p_rollout_ref)
optimizer.step()
p_rollin_ref.step()
p_rollout_ref.step()
env = test_mdp.mk_env()
res = env.run_episode(policy)
loss = mdp.MDPLoss()(test_mdp, env)
losses.append(loss)
if epoch % 20 == 0:
print epoch, sum(losses[-100:]) / len(losses[-100:]), '\t', res
if __name__ == '__main__':
test1(LearnerOpts.MAXLIK)
test1(LearnerOpts.DAGGER)
test1(LearnerOpts.AGGREVATE)
test1(LearnerOpts.LOLS)
|
parallelformers/policies/blenderbot_small.py | Oaklight/parallelformers | 454 | 12629921 | <reponame>Oaklight/parallelformers
# Copyright 2021 TUNiB inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers.models.blenderbot_small.modeling_blenderbot_small import (
BlenderbotSmallDecoderLayer,
BlenderbotSmallEncoderLayer,
)
from parallelformers.policies.base import Layer, Policy
from parallelformers.transformers.modeling_bart import BartAttention_
from parallelformers.utils.dist_utils import AllReduceLinear
class BlenderbotSmallEncoderPolicy(Policy):
@staticmethod
def replace_arguments(config, world_size):
return {
# 1. reduce hidden size
"self_attn.embed_dim": config.d_model // world_size,
# 2. reduce number of heads
"self_attn.num_heads": config.encoder_attention_heads // world_size,
}
@staticmethod
def replace_modules():
return {
"BlenderbotSmallAttention": BartAttention_,
}
@staticmethod
def attn_qkv():
return [
Layer(
weight="self_attn.q_proj.weight",
bias="self_attn.q_proj.bias",
),
Layer(
weight="self_attn.k_proj.weight",
bias="self_attn.k_proj.bias",
),
Layer(
weight="self_attn.v_proj.weight",
bias="self_attn.v_proj.bias",
),
]
@staticmethod
def attn_out():
return [
Layer(
weight="self_attn.out_proj.weight",
bias="self_attn.out_proj.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def mlp_in():
return [
Layer(
weight="fc1.weight",
bias="fc1.bias",
),
]
@staticmethod
def mlp_out():
return [
Layer(
weight="fc2.weight",
bias="fc2.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def original_layer_class():
return BlenderbotSmallEncoderLayer
class BlenderbotSmallDecoderPolicy(Policy):
@staticmethod
def replace_arguments(config, world_size):
return {
# 1. reduce hidden size
"self_attn.embed_dim": config.d_model // world_size,
"encoder_attn.embed_dim": config.d_model // world_size,
# 2. reduce number of heads
"self_attn.num_heads": config.decoder_attention_heads // world_size,
"encoder_attn.num_heads": config.decoder_attention_heads // world_size,
}
@staticmethod
def replace_modules():
return {
"BlenderbotSmallAttention": BartAttention_,
}
@staticmethod
def attn_qkv():
return [
Layer(
weight="self_attn.q_proj.weight",
bias="self_attn.q_proj.bias",
),
Layer(
weight="self_attn.k_proj.weight",
bias="self_attn.k_proj.bias",
),
Layer(
weight="self_attn.v_proj.weight",
bias="self_attn.v_proj.bias",
),
Layer(
weight="encoder_attn.q_proj.weight",
bias="encoder_attn.q_proj.bias",
),
Layer(
weight="encoder_attn.k_proj.weight",
bias="encoder_attn.k_proj.bias",
),
Layer(
weight="encoder_attn.v_proj.weight",
bias="encoder_attn.v_proj.bias",
),
]
@staticmethod
def attn_out():
return [
Layer(
weight="self_attn.out_proj.weight",
bias="self_attn.out_proj.bias",
replace=AllReduceLinear,
),
Layer(
weight="encoder_attn.out_proj.weight",
bias="encoder_attn.out_proj.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def mlp_in():
return [
Layer(
weight="fc1.weight",
bias="fc1.bias",
),
]
@staticmethod
def mlp_out():
return [
Layer(
weight="fc2.weight",
bias="fc2.bias",
replace=AllReduceLinear,
),
]
@staticmethod
def original_layer_class():
return BlenderbotSmallDecoderLayer
|
jirafs/commands/debug.py | coddingtonbear/jirafs | 119 | 12629927 | from jirafs.plugin import CommandPlugin
try:
import ipdb as pdb # noqa
except ImportError:
import pdb
class Command(CommandPlugin):
""" Open a debug console """
MIN_VERSION = "2.0.0"
MAX_VERSION = "3.0.0"
def main(self, folder, **kwargs):
return pdb.set_trace()
|
rdkit/ML/Neural/NetNode.py | kazuyaujihara/rdkit | 1,609 | 12629930 | #
# Copyright (C) 2000-2008 <NAME>
#
""" Contains the class _NetNode_ which is used to represent nodes in neural nets
**Network Architecture:**
A tacit assumption in all of this stuff is that we're dealing with
feedforward networks.
The network itself is stored as a list of _NetNode_ objects. The list
is ordered in the sense that nodes in earlier/later layers than a
given node are guaranteed to come before/after that node in the list.
This way we can easily generate the values of each node by moving
sequentially through the list, we're guaranteed that every input for a
node has already been filled in.
Each node stores a list (_inputNodes_) of indices of its inputs in the
main node list.
"""
import numpy
from . import ActFuncs
# FIX: this class has not been updated to new-style classes
# (RD Issue380) because that would break all of our legacy pickled
# data. Until a solution is found for this breakage, an update is
# impossible.
class NetNode:
""" a node in a neural network
"""
def Eval(self, valVect):
"""Given a set of inputs (valVect), returns the output of this node
**Arguments**
- valVect: a list of inputs
**Returns**
the result of running the values in valVect through this node
"""
if self.inputNodes and len(self.inputNodes) != 0:
# grab our list of weighted inputs
inputs = numpy.take(valVect, self.inputNodes)
# weight them
inputs = self.weights * inputs
# run that through the activation function
val = self.actFunc(sum(inputs))
else:
val = 1
# put our value in the list and return it (just in case)
valVect[self.nodeIndex] = val
return val
def SetInputs(self, inputNodes):
""" Sets the input list
**Arguments**
- inputNodes: a list of _NetNode_s which are to be used as inputs
**Note**
If this _NetNode_ already has weights set and _inputNodes_ is a different length,
this will bomb out with an assertion.
"""
if self.weights is not None:
assert len(self.weights) == len(inputNodes), \
'lengths of weights and nodes do not match'
self.inputNodes = inputNodes[:]
def GetInputs(self):
""" returns the input list
"""
return self.inputNodes
def SetWeights(self, weights):
""" Sets the weight list
**Arguments**
- weights: a list of values which are to be used as weights
**Note**
If this _NetNode_ already has _inputNodes_ and _weights_ is a different length,
this will bomb out with an assertion.
"""
if self.inputNodes:
assert len(weights) == len(self.inputNodes),\
'lengths of weights and nodes do not match'
self.weights = numpy.array(weights)
def GetWeights(self):
""" returns the weight list
"""
return self.weights
def __init__(self, nodeIndex, nodeList, inputNodes=None, weights=None, actFunc=ActFuncs.Sigmoid,
actFuncParms=()):
""" Constructor
**Arguments**
- nodeIndex: the integer index of this node in _nodeList_
- nodeList: the list of other _NetNodes_ already in the network
- inputNodes: a list of this node's inputs
- weights: a list of this node's weights
- actFunc: the activation function to be used here. Must support the API
of _ActFuncs.ActFunc_.
- actFuncParms: a tuple of extra arguments to be passed to the activation function
constructor.
**Note**
There should be only one copy of _inputNodes_, every _NetNode_ just has a pointer
to it so that changes made at one node propagate automatically to the others.
"""
if inputNodes and weights:
assert (len(weights) == len(inputNodes))
if weights:
self.weights = numpy.array(weights)
else:
self.weights = None
if inputNodes:
self.inputNodes = inputNodes[:]
else:
self.inputNodes = None
self.nodeIndex = nodeIndex
# there's only one of these, everybody has a pointer to it.
self.nodeList = nodeList
self.actFunc = actFunc(*actFuncParms)
|
scripts/seastar-json2code.py | liubangchen/seastar | 6,526 | 12629935 | #!/usr/bin/env python3
# C++ Code generation utility from Swagger definitions.
# This utility support Both the swagger 1.2 format
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/1.2.md
# And the 2.0 format
# https://github.com/OAI/OpenAPI-Specification/blob/master/versions/2.0.md
#
# Swagger 2.0 is not only different in its structure (apis have moved, and
# models are now under definitions) It also moved from multiple file structure
# to a single file.
# To keep the multiple file support, each group of APIs will be placed in a single file
# Each group can have a .def.json file with its definitions (What used to be models)
# Because the APIs in definitions are snippets, they are not legal json objects
# and need to be formated as such so that a json parser would work.
import json
import sys
import re
import glob
import argparse
import os
from string import Template
parser = argparse.ArgumentParser(description="""Generate C++ class for json
handling from swagger definition""")
parser.add_argument('--outdir', help='the output directory', default='autogen')
parser.add_argument('-o', help='Output file', default='')
parser.add_argument('-f', help='input file', default='api-java.json')
parser.add_argument('-ns', help="""namespace when set struct will be created
under the namespace""", default='')
parser.add_argument('-jsoninc', help='relative path to the jsaon include',
default='json/')
parser.add_argument('-jsonns', help='set the json namespace', default='json')
parser.add_argument('-indir', help="""when set all json file in the given
directory will be parsed, do not use with -f""", default='')
parser.add_argument('-debug', help='debug level 0 -quite,1-error,2-verbose',
default='1', type=int)
parser.add_argument('-combined', help='set the name of the combined file',
default='autogen/pathautogen.ee')
parser.add_argument('--create-cc', dest='create_cc', action='store_true', default=False,
help='Put global variables in a .cc file')
config = parser.parse_args()
valid_vars = {'string': 'sstring', 'int': 'int', 'double': 'double',
'float': 'float', 'long': 'long', 'boolean': 'bool', 'char': 'char',
'datetime': 'json::date_time'}
current_file = ''
spacing = " "
def getitem(d, key, name):
if key in d:
return d[key]
else:
raise Exception("'" + key + "' not found in " + name)
def fprint(f, *args):
for arg in args:
f.write(arg)
def fprintln(f, *args):
for arg in args:
f.write(arg)
f.write('\n')
def open_namespace(f, ns=config.ns):
fprintln(f, "namespace ", ns , ' {\n')
def close_namespace(f):
fprintln(f, '}')
def add_include(f, includes):
for include in includes:
fprintln(f, '#include ', include)
fprintln(f, "")
def trace_verbose(*params):
if config.debug > 1:
print(''.join(params))
def trace_err(*params):
if config.debug > 0:
print(current_file + ':' + ''.join(params))
def valid_type(param):
if param in valid_vars:
return valid_vars[param]
trace_err("Type [", param, "] not defined")
return param
def type_change(param, member):
if param == "array":
if "items" not in member:
trace_err("array without item declaration in ", param)
return ""
item = member["items"]
if "type" in item:
t = item["type"]
elif "$ref" in item:
t = item["$ref"]
else:
trace_err("array items with no type or ref declaration ", param)
return ""
return "json_list< " + valid_type(t) + " >"
return "json_element< " + valid_type(param) + " >"
def print_ind_comment(f, ind, *params):
fprintln(f, ind, "/**")
for s in params:
fprintln(f, ind, " * ", s)
fprintln(f, ind, " */")
def print_comment(f, *params):
print_ind_comment(f, spacing, *params)
def print_copyrights(f):
fprintln(f, "/*")
fprintln(f, "* Copyright (C) 2014 Cloudius Systems, Ltd.")
fprintln(f, "*")
fprintln(f, "* This work is open source software, licensed under the",
" terms of the")
fprintln(f, "* BSD license as described in the LICENSE f in the top-",
"level directory.")
fprintln(f, "*")
fprintln(f, "* This is an Auto-Generated-code ")
fprintln(f, "* Changes you do in this file will be erased on next",
" code generation")
fprintln(f, "*/\n")
def print_h_file_headers(f, name):
print_copyrights(f)
fprintln(f, "#ifndef __JSON_AUTO_GENERATED_" + name)
fprintln(f, "#define __JSON_AUTO_GENERATED_" + name + "\n")
def clean_param(param):
match = re.match(r"^\{\s*([^\}]+)\s*}", param)
if match:
return [match.group(1), False]
return [param, True]
def get_parameter_by_name(obj, name):
for p in obj["parameters"]:
if p["name"] == name:
return p
trace_err ("No Parameter declaration found for ", name)
def clear_path_ending(path):
if not path or path[-1] != '/':
return path
return path[0:-1]
# check if a parameter is query required.
# It will return true if the required flag is set
# and if it is a query parameter, both swagger 1.2 'paramType' and swagger 2.0 'in' attributes
# are supported
def is_required_query_param(param):
return "required" in param and param["required"] and ("paramType" in param and param["paramType"] == "query" or "in" in param and param["in"] == "query")
def add_path(f, path, details):
if "summary" in details:
print_comment(f, details["summary"])
param_starts = path.find("{")
if param_starts >= 0:
path_reminder = path[param_starts:]
vals = path.split("/")
vals.reverse()
fprintln(f, spacing, 'path_description::add_path("', clear_path_ending(vals.pop()),
'",', details["method"], ',"', details["nickname"], '")')
while vals:
param, is_url = clean_param(vals.pop())
if is_url:
fprintln(f, spacing, ' ->pushurl("', param, '")')
else:
param_type = get_parameter_by_name(details, param)
if ("allowMultiple" in param_type and
param_type["allowMultiple"] == True):
fprintln(f, spacing, ' ->pushparam("', param, '",true)')
else:
fprintln(f, spacing, ' ->pushparam("', param, '")')
else:
fprintln(f, spacing, 'path_description::add_path("', clear_path_ending(path), '",',
details["method"], ',"', details["nickname"], '")')
if "parameters" in details:
for param in details["parameters"]:
if is_required_query_param(param):
fprintln(f, spacing, ' ->pushmandatory_param("', param["name"], '")')
fprintln(f, spacing, ";")
def get_base_name(param):
return os.path.basename(param)
def is_model_valid(name, model):
if name in valid_vars:
return ""
properties = getitem(model[name], "properties", name)
for var in properties:
type = getitem(properties[var], "type", name + ":" + var)
if type == "array":
items = getitem(properties[var], "items", name + ":" + var);
try :
type = getitem(items, "type", name + ":" + var + ":items")
except Exception as e:
try:
type = getitem(items, "$ref", name + ":" + var + ":items")
except:
raise e;
if type not in valid_vars:
if type not in model:
raise Exception("Unknown type '" + type + "' in Model '" + name + "'")
return type
valid_vars[name] = name
return ""
def resolve_model_order(data):
res = []
models = set()
for model_name in data:
visited = set(model_name)
missing = is_model_valid(model_name, data)
resolved = missing == ''
if not resolved:
stack = [model_name]
while not resolved:
if missing in visited:
raise Exception("Cyclic dependency found: " + missing)
missing_depends = is_model_valid(missing, data)
if missing_depends == '':
if missing not in models:
res.append(missing)
models.add(missing)
resolved = len(stack) == 0
if not resolved:
missing = stack.pop()
else:
stack.append(missing)
missing = missing_depends
elif model_name not in models:
res.append(model_name)
models.add(model_name)
return res
def create_enum_wrapper(model_name, name, values):
enum_name = model_name + "_" + name
res = " enum class " + enum_name + " {"
for enum_entry in values:
res = res + " " + enum_entry + ", "
res = res + "NUM_ITEMS};\n"
wrapper = name + "_wrapper"
res = res + Template(""" struct $wrapper : public json::jsonable {
$wrapper() = default;
virtual std::string to_json() const {
switch(v) {
""").substitute({'wrapper' : wrapper})
for enum_entry in values:
res = res + " case " + enum_name + "::" + enum_entry + ": return \"\\\"" + enum_entry + "\\\"\";\n"
res = res + Template(""" default: return \"\\\"Unknown\\\"\";
}
}
template<class T>
$wrapper (const T& _v) {
switch(_v) {
""").substitute({'wrapper' : wrapper})
for enum_entry in values:
res = res + " case T::" + enum_entry + ": v = " + enum_name + "::" + enum_entry + "; break;\n"
res = res + Template(""" default: v = $enum_name::NUM_ITEMS;
}
}
template<class T>
operator T() const {
switch(v) {
""").substitute({'enum_name': enum_name})
for enum_entry in values:
res = res + " case " + enum_name + "::" + enum_entry + ": return T::" + enum_entry + ";\n"
return res + Template(""" default: return T::$value;
}
}
typedef typename std::underlying_type<$enum_name>::type pos_type;
$wrapper& operator++() {
v = static_cast<$enum_name>(static_cast<pos_type>(v) + 1);
return *this;
}
$wrapper & operator++(int) {
return ++(*this);
}
bool operator==(const $wrapper& c) const {
return v == c.v;
}
bool operator!=(const $wrapper& c) const {
return v != c.v;
}
bool operator<=(const $wrapper& c) const {
return static_cast<pos_type>(v) <= static_cast<pos_type>(c.v);
}
static $wrapper begin() {
return $wrapper ($enum_name::$value);
}
static $wrapper end() {
return $wrapper ($enum_name::NUM_ITEMS);
}
static boost::integer_range<$wrapper> all_items() {
return boost::irange(begin(), end());
}
$enum_name v;
};
""").substitute({'enum_name': enum_name, 'wrapper' : wrapper, 'value':values[0]})
def to_operation(opr, data):
data["method"] = opr.upper()
data["nickname"] = data["operationId"]
return data
def to_path(path, data):
data["operations"] = [to_operation(k, data[k]) for k in data]
data["path"] = path
return data
def create_h_file(data, hfile_name, api_name, init_method, base_api):
if config.o != '':
final_hfile_name = config.o
else:
final_hfile_name = config.outdir + "/" + hfile_name
hfile = open(final_hfile_name, "w")
if config.create_cc:
ccfile = open(final_hfile_name.rstrip('.hh') + ".cc", "w")
add_include(ccfile, ['"{}"'.format(final_hfile_name)])
open_namespace(ccfile, "seastar")
open_namespace(ccfile, "httpd")
open_namespace(ccfile, api_name)
else:
ccfile = hfile
print_h_file_headers(hfile, api_name)
add_include(hfile, ['<seastar/core/sstring.hh>',
'<seastar/json/json_elements.hh>',
'<seastar/http/json_path.hh>'])
add_include(hfile, ['<iostream>', '<boost/range/irange.hpp>'])
open_namespace(hfile, "seastar")
open_namespace(hfile, "httpd")
open_namespace(hfile, api_name)
if "models" in data:
models_order = resolve_model_order(data["models"])
for model_name in models_order:
model = data["models"][model_name]
if 'description' in model:
print_ind_comment(hfile, "", model["description"])
fprintln(hfile, "struct ", model_name, " : public json::json_base {")
member_init = ''
member_assignment = ''
member_copy = ''
for member_name in model["properties"]:
member = model["properties"][member_name]
if "description" in member:
print_comment(hfile, member["description"])
if "enum" in member:
enum_name = model_name + "_" + member_name
fprintln(hfile, create_enum_wrapper(model_name, member_name, member["enum"]))
fprintln(hfile, " ", config.jsonns, "::json_element<",
member_name, "_wrapper> ",
member_name, ";\n")
else:
fprintln(hfile, " ", config.jsonns, "::",
type_change(member["type"], member), " ",
member_name, ";\n")
member_init += " add(&" + member_name + ',"'
member_init += member_name + '");\n'
member_assignment += " " + member_name + " = " + "e." + member_name + ";\n"
member_copy += " e." + member_name + " = " + member_name + ";\n"
fprintln(hfile, "void register_params() {")
fprintln(hfile, member_init)
fprintln(hfile, '}')
fprintln(hfile, model_name, '() {')
fprintln(hfile, ' register_params();')
fprintln(hfile, '}')
fprintln(hfile, model_name, '(const ' + model_name + ' & e) {')
fprintln(hfile, ' register_params();')
fprintln(hfile, member_assignment)
fprintln(hfile, '}')
fprintln(hfile, "template<class T>")
fprintln(hfile, model_name, "& operator=(const ", "T& e) {")
fprintln(hfile, member_assignment)
fprintln(hfile, " return *this;")
fprintln(hfile, "}")
fprintln(hfile, model_name, "& operator=(const ", model_name, "& e) {")
fprintln(hfile, member_assignment)
fprintln(hfile, " return *this;")
fprintln(hfile, "}")
fprintln(hfile, "template<class T>")
fprintln(hfile, model_name, "& update(T& e) {")
fprintln(hfile, member_copy)
fprintln(hfile, " return *this;")
fprintln(hfile, "}")
fprintln(hfile, "};\n\n")
# print_ind_comment(hfile, "", "Initialize the path")
# fprintln(hfile, init_method + "(const std::string& description);")
fprintln(hfile, 'static const sstring name = "', base_api, '";')
for item in data["apis"]:
path = item["path"]
if "operations" in item:
for oper in item["operations"]:
if "summary" in oper:
print_comment(hfile, oper["summary"])
param_starts = path.find("{")
base_url = path
vals = []
if param_starts >= 0:
vals = path[param_starts:].split("/")
vals.reverse()
base_url = path[:param_starts]
varname = getitem(oper, "nickname", oper)
if config.create_cc:
fprintln(hfile, 'extern const path_description ', varname, ';')
maybe_static = ''
else:
maybe_static = 'static '
fprintln(ccfile, maybe_static, 'const path_description ', varname, '("', clear_path_ending(base_url),
'",', oper["method"], ',"', oper["nickname"], '",')
fprint(ccfile, '{')
first = True
while vals:
path_param, is_url = clean_param(vals.pop())
if path_param == "":
continue
if first == True:
first = False
else:
fprint(ccfile, "\n,")
if is_url:
fprint(ccfile, '{', '"/', path_param , '", path_description::url_component_type::FIXED_STRING', '}')
else:
path_param_type = get_parameter_by_name(oper, path_param)
if ("allowMultiple" in path_param_type and
path_param_type["allowMultiple"] == True):
fprint(ccfile, '{', '"', path_param , '", path_description::url_component_type::PARAM_UNTIL_END_OF_PATH', '}')
else:
fprint(ccfile, '{', '"', path_param , '", path_description::url_component_type::PARAM', '}')
fprint(ccfile, '}')
fprint(ccfile, ',{')
first = True
enum_definitions = ""
if "enum" in oper:
enum_definitions = ("namespace ns_" + oper["nickname"] + " {\n" +
create_enum_wrapper(oper["nickname"], "return_type", oper["enum"]) +
"}\n")
funcs = ""
if "parameters" in oper:
for param in oper["parameters"]:
if is_required_query_param(param):
if first == True:
first = False
else:
fprint(ccfile, "\n,")
fprint(ccfile, '"', param["name"], '"')
if "enum" in param:
enum_definitions = enum_definitions + 'namespace ns_' + oper["nickname"] + '{\n'
enm = param["name"]
enum_definitions = enum_definitions + 'enum class ' + enm + ' {'
for val in param["enum"]:
enum_definitions = enum_definitions + val + ", "
enum_definitions = enum_definitions + 'NUM_ITEMS};\n'
enum_definitions = enum_definitions + enm + ' str2' + enm + '(const sstring& str);'
funcs = funcs + enm + ' str2' + enm + '(const sstring& str) {\n'
funcs = funcs + ' static const sstring arr[] = {"' + '","'.join(param["enum"]) + '"};\n'
funcs = funcs + ' int i;\n'
funcs = funcs + ' for (i=0; i < ' + str(len(param["enum"])) + '; i++) {\n'
funcs = funcs + ' if (arr[i] == str) {return (' + enm + ')i;}\n}\n'
funcs = funcs + ' return (' + enm + ')i;\n'
funcs = funcs + '}\n'
enum_definitions = enum_definitions + '}\n'
fprintln(ccfile, '});')
fprintln(hfile, enum_definitions)
open_namespace(ccfile, 'ns_' + oper["nickname"])
fprintln(ccfile, funcs)
close_namespace(ccfile)
close_namespace(hfile)
close_namespace(hfile)
close_namespace(hfile)
if config.create_cc:
close_namespace(ccfile)
close_namespace(ccfile)
close_namespace(ccfile)
hfile.write("#endif //__JSON_AUTO_GENERATED_HEADERS\n")
hfile.close()
def remove_leading_comma(data):
return re.sub(r'^\s*,','', data)
def format_as_json_object(data):
return "{" + remove_leading_comma(data) + "}"
def check_for_models(data, param):
model_name = param.replace(".json", ".def.json")
if not os.path.isfile(model_name):
return
try:
with open(model_name) as myfile:
json_data = myfile.read()
def_data = json.loads(format_as_json_object(json_data))
data["models"] = def_data
except Exception as e:
type, value, tb = sys.exc_info()
print("Bad formatted JSON definition file '" + model_name + "' error ", value.message)
sys.exit(-1)
def set_apis(data):
return {"apis": [to_path(p, data[p]) for p in data]}
def parse_file(param, combined):
global current_file
trace_verbose("parsing ", param, " file")
with open(param) as myfile:
json_data = myfile.read()
try:
data = json.loads(json_data)
except Exception as e:
try:
# the passed data is not a valid json, so maybe its a swagger 2.0
# snippet, format it as json and try again
# set_apis and check_for_models will create an object with a similiar format
# to a swagger 1.2 so the code generation would work
data = set_apis(json.loads(format_as_json_object(json_data)))
check_for_models(data, param)
except:
# The problem is with the file,
# just report the error and exit.
type, value, tb = sys.exc_info()
print("Bad formatted JSON file '" + param + "' error ", value.message)
sys.exit(-1)
try:
base_file_name = get_base_name(param)
current_file = base_file_name
hfile_name = base_file_name + ".hh"
api_name = base_file_name.replace('.', '_')
base_api = base_file_name.replace('.json', '')
init_method = "void " + api_name + "_init_path"
trace_verbose("creating ", hfile_name)
if (combined):
fprintln(combined, '#include "', base_file_name, ".cc", '"')
create_h_file(data, hfile_name, api_name, init_method, base_api)
except:
type, value, tb = sys.exc_info()
print("Error while parsing JSON file '" + param + "' error ", value.message)
sys.exit(-1)
if "indir" in config and config.indir != '':
combined = open(config.combined, "w")
for f in glob.glob(os.path.join(config.indir, "*.json")):
parse_file(f, combined)
else:
parse_file(config.f, None)
|
cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_SUBSCRIBER_SESSION_TC_MIB.py | Maikor/ydk-py | 177 | 12629977 | <filename>cisco-ios-xe/ydk/models/cisco_ios_xe/CISCO_SUBSCRIBER_SESSION_TC_MIB.py
""" CISCO_SUBSCRIBER_SESSION_TC_MIB
This MIB module defines textual conventions describing
subscriber sessions.
"""
from collections import OrderedDict
from ydk.types import Entity, EntityPath, Identity, Enum, YType, YLeaf, YLeafList, YList, LeafDataList, Bits, Empty, Decimal64
from ydk.filters import YFilter
from ydk.errors import YError, YModelError
from ydk.errors.error_handler import handle_type_error as _handle_type_error
class SubSessionRedundancyMode(Enum):
"""
SubSessionRedundancyMode (Enum Class)
An enumerated integer\-value describing the redundancy mode in
which a subscriber session is operating\:
'none'
The subscriber session is not part of a redundant
configuration.
'other'
The subscriber session is part of a redundant
configuration and is in a state not recognized by this
MIB module.
'active'
The subscriber session is part of a redundant
configuration and is forwarding traffic from the
subscriber.
'standby'
The subscriber session is part of a redundant
configuration and ready to become active in the case of
a fail\-over event (e.g., linecard failure).
.. data:: none = 1
.. data:: other = 2
.. data:: active = 3
.. data:: standby = 4
"""
none = Enum.YLeaf(1, "none")
other = Enum.YLeaf(2, "other")
active = Enum.YLeaf(3, "active")
standby = Enum.YLeaf(4, "standby")
class SubSessionState(Enum):
"""
SubSessionState (Enum Class)
An enumerated integer\-value describing the state of a
subscriber session\:
'other'
The subscriber session is in a state not recognized by
the implementation of a MIB module using this textual
convention.
'pending'
The subscriber session is in the PENDING state;
that is, the subscriber session has been initiated and
the system is in the process of establishing the
subscriber session.
'up'
The subscriber session is in the UP state; that is, the
system has established the subscriber session.
.. data:: other = 1
.. data:: pending = 2
.. data:: up = 3
"""
other = Enum.YLeaf(1, "other")
pending = Enum.YLeaf(2, "pending")
up = Enum.YLeaf(3, "up")
class SubSessionType(Enum):
"""
SubSessionType (Enum Class)
An enumerated integer\-value describing the type of subscriber
session. This value has the intent of refining the interface
type assigned to a subscriber session. The interface type
assigned to a subscriber session groups those types of
subscriber sessions with similar interface semantics.
A PPP subscriber session consists of a PPP connection
(RFC\-1661)
and has an interface type of 'ppp'. The following subscriber
types refine PPP subscriber sessions\:
'pppSubscriber'
A PPP connection initiated over a circuit (e.g., an
ISDN
line or ATM VC) using the LCP (RFC\-1661).
'pppoeSubscriber'
A PPP connection over Ethernet (RFC\-2516), initiated
by a PADI (PPPoE Active Discovery Initiation) packet.
'l2tpSubscriber'
A PPP connection over an L2TP tunnel (RFC\-2661),
initiated by an Incoming\-Call\-Request control message.
'l2fSubscriber'
A PPP connection over an L2F tunnel (RFC\-2341),
initiated by a L2F\_OPEN message with a non\-zero MID.
An IP subscriber session consists of the routed traffic
associated with a subscriber IP address having an interface
type of 'ipSubscriber'. Routed traffic describes IP traffic
that transits at least one router. If a subscriber's IP
address
is not unique to the system, further distinguishing
characteristics, such as VRF or MAC address, form part of the
subscriber's identity. The following subscriber types refine
IP
subscriber sessions\:
'ipInterfaceSubscriber'
An IP subcriber session provisioned by the system's
configuration which consists of all traffic received by
the interface to which the provisioning applies.
'ipPktSubscriber'
An IP subscriber session initiated by the receipt of
the
first packet received with an unclassified source IP
address.
'ipDhcpv4Subscriber'
An IP subscriber session initiated by the receipt of a
DHCPv4 DISCOVER packet (RFC\-2131).
'ipRadiusSubscriber'
An IP subscriber session initiated by the receipt of a
RADIUS Access\-Request packet (RFC\-2865).
An L2 subscriber session consists of the non\-routed traffic
associated with a subscriber IP address having an interface
type of 'l2Subscriber'. Non\-routed traffic describes IP
traffic
that doesn't transit a router, meaning that the subscriber must
be directly connected to the system or have a connection
through
an L2 access network (e.g., bridges, switches, and tunnels).
The
following subscriber types refine L2 subscriber sessions\:
'l2MacPacketSubscriber'
An L2 subscriber session initiated by the receipt of
the
first layer 2 packet with an unclassified source MAC
address.
'l2Dhcpv4Subscriber'
An L2 subscriber session initiated by the receipt of a
DHCPv4 DISCOVER packet (RFC\-2131).
'l2RadiusSucriber'
An L2 subscriber session initiated by the receipt of a
RADIUS Access\-Request packet (RFC\-2865).
The system should assign the value 'other' to any subscriber
session not recognized by the implementation of a MIB module
using this textual convention.
The value 'all' represents a special value used to indicate all
subscriber session types. For example, a scope of aggregation
that includes all subscriber session types uses this value to
indicate this fact.
.. data:: all = 1
.. data:: other = 2
.. data:: pppSubscriber = 3
.. data:: pppoeSubscriber = 4
.. data:: l2tpSubscriber = 5
.. data:: l2fSubscriber = 6
.. data:: ipInterfaceSubscriber = 7
.. data:: ipPktSubscriber = 8
.. data:: ipDhcpv4Subscriber = 9
.. data:: ipRadiusSubscriber = 10
.. data:: l2MacSubscriber = 11
.. data:: l2Dhcpv4Subscriber = 12
.. data:: l2RadiusSubscriber = 13
"""
all = Enum.YLeaf(1, "all")
other = Enum.YLeaf(2, "other")
pppSubscriber = Enum.YLeaf(3, "pppSubscriber")
pppoeSubscriber = Enum.YLeaf(4, "pppoeSubscriber")
l2tpSubscriber = Enum.YLeaf(5, "l2tpSubscriber")
l2fSubscriber = Enum.YLeaf(6, "l2fSubscriber")
ipInterfaceSubscriber = Enum.YLeaf(7, "ipInterfaceSubscriber")
ipPktSubscriber = Enum.YLeaf(8, "ipPktSubscriber")
ipDhcpv4Subscriber = Enum.YLeaf(9, "ipDhcpv4Subscriber")
ipRadiusSubscriber = Enum.YLeaf(10, "ipRadiusSubscriber")
l2MacSubscriber = Enum.YLeaf(11, "l2MacSubscriber")
l2Dhcpv4Subscriber = Enum.YLeaf(12, "l2Dhcpv4Subscriber")
l2RadiusSubscriber = Enum.YLeaf(13, "l2RadiusSubscriber")
|
examples/BERT/mlm_task_adaptdl.py | jessezbj/adaptdl | 294 | 12629978 | import argparse
import time
import math
import torch
import torch.nn as nn
from model import MLMTask
from utils import run_demo, run_ddp, wrap_up
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter # Added for tensorboard
from torch.utils.data import DataLoader
import adaptdl # Changed in step 1
import adaptdl.torch # Changed in step 1
import os # Added for tensorboard
def collate_batch(batch_data, args, mask_id, cls_id):
if len(batch_data) % args.bptt != 0:
# print(len(batch_data))
batch_data = batch_data[:len(batch_data)//args.bptt*args.bptt]
batch_data = \
torch.tensor(batch_data).long().view(args.bptt, -1).t().contiguous()
# Generate masks with args.mask_frac
data_len = batch_data.size(0)
ones_num = int(data_len * args.mask_frac)
zeros_num = data_len - ones_num
lm_mask = torch.cat([torch.zeros(zeros_num), torch.ones(ones_num)])
lm_mask = lm_mask[torch.randperm(data_len)]
batch_data = \
torch.cat((torch.tensor([[cls_id] * batch_data.size(1)]).long(),
batch_data))
lm_mask = torch.cat((torch.tensor([0.0]), lm_mask))
targets = torch.stack(
[batch_data[i] for i in range(lm_mask.size(0)) if lm_mask[i]]).view(-1)
batch_data = batch_data.masked_fill(lm_mask.bool().unsqueeze(1), mask_id)
return batch_data, lm_mask, targets
def process_raw_data(raw_data, args):
_num = raw_data.size(0) // (args.batch_size * args.bptt)
raw_data = raw_data[:(_num * args.batch_size * args.bptt)]
return raw_data
def evaluate(data_source, model, vocab, ntokens, criterion, args, device,
test=False, epoch=None, writer=None):
# Turn on evaluation mode which disables dropout.
model.eval()
if test:
total_loss = 0.
else:
stats = adaptdl.torch.Accumulator()
mask_id = vocab.stoi['<MASK>']
cls_id = vocab.stoi['<cls>']
dataloader = DataLoader(
data_source, batch_size=args.batch_size * args.bptt,
shuffle=False,
collate_fn=lambda b: collate_batch(b, args, mask_id, cls_id))
with torch.no_grad():
for batch, (data, lm_mask, targets) in enumerate(dataloader):
if args.parallel == 'DDP':
data = data.to(device[0])
targets = targets.to(device[0])
else:
data = data.to(device)
targets = targets.to(device)
data = data.transpose(0, 1) # Wrap up by DDP or DataParallel
output = model(data)
output = torch.stack(
[output[i] for i in range(lm_mask.size(0)) if lm_mask[i]])
output_flat = output.view(-1, ntokens)
if test:
total_loss += criterion(output_flat, targets).item()
else:
stats['test_loss'] += criterion(output_flat, targets).item()
stats['total'] += targets.size(0)
if test:
return total_loss / ((len(data_source) - 1) / args.bptt / 32)
with stats.synchronized():
test_loss = (stats['test_loss'] / ((len(data_source) - 1) / args.bptt / args.batch_size) /
adaptdl.env.num_replicas())
writer.add_scalar("Loss/valid", test_loss, epoch)
return test_loss
def train(model, vocab, train_loss_log, train_data,
optimizer, criterion, ntokens, epoch, scheduler,
args, device, rank=None, batch_size_log=None, writer=None):
# TODO: reduce number of args for this function
model.train()
total_loss = 0
start_time = time.time()
mask_id = vocab.stoi['<MASK>']
cls_id = vocab.stoi['<cls>']
train_loss_log.append(0.0)
base_bsz = args.batch_size * args.bptt
dataloader = adaptdl.torch.AdaptiveDataLoader(
train_data, drop_last=True, batch_size=base_bsz, shuffle=False,
collate_fn=lambda b: collate_batch(b, args, mask_id, cls_id))
dataloader.autoscale_batch_size(
128 * base_bsz,
local_bsz_bounds=(max(base_bsz / 4, 2048), min(2 * base_bsz, 4096*2)),
gradient_accumulation=args.gradient_accumulation)
for batch, (data, lm_mask, targets) in enumerate(dataloader):
optimizer.zero_grad()
if args.parallel == 'DDP':
print("DDP")
data = data.to(device[0])
targets = targets.to(device[0])
else:
data = data.to(device)
targets = targets.to(device)
data = data.transpose(0, 1) # Wrap up by DDP or DataParallel
output = model(data)
output = torch.stack(
[output[i] for i in range(lm_mask.size(0)) if lm_mask[i]])
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
if not model.adascale.is_accumulation_step():
torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip)
optimizer.step()
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
batch = batch // (dataloader.accumulation_steps + 1)
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
if (rank is None) or rank == 0:
train_loss_log[-1] = cur_loss
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:05.5f} | '
'ms/batch {:5.2f} | loss {:5.2f} | '
'ppl {:8.2f} | batch_size {:5d}'.format(
epoch, batch,
len(train_data) // dataloader.current_batch_size,
scheduler.get_last_lr()[0],
elapsed * 1000 / args.log_interval,
cur_loss, math.exp(cur_loss),
dataloader.current_batch_size))
total_loss = 0
start_time = time.time()
dataloader.to_tensorboard(writer, epoch, tag_prefix="AdaptDL/Data/")
model.to_tensorboard(writer, epoch, tag_prefix="AdaptDL/Model/")
batch_size_log.append(dataloader.current_batch_size)
def run_main(args, rank=None):
torch.manual_seed(args.seed)
if args.parallel == 'DDP':
n = torch.cuda.device_count() // args.world_size
device = list(range(rank * n, (rank + 1) * n))
else:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
import torchtext
if args.dataset == 'WikiText103':
from torchtext.experimental.datasets import WikiText103 as WLMDataset
elif args.dataset == 'WikiText2':
from torchtext.experimental.datasets import WikiText2 as WLMDataset
elif args.dataset == 'WMTNewsCrawl':
from data import WMTNewsCrawl as WLMDataset
elif args.dataset == 'EnWik9':
from torchtext.datasets import EnWik9
elif args.dataset == 'BookCorpus':
from data import BookCorpus
else:
print("dataset for MLM task is not supported")
try:
vocab = torch.load(args.save_vocab)
except: # noqa: E722
train_dataset, test_dataset, valid_dataset = WLMDataset()
old_vocab = train_dataset.vocab
vocab = torchtext.vocab.Vocab(counter=old_vocab.freqs,
specials=['<unk>', '<pad>', '<MASK>'])
with open(args.save_vocab, 'wb') as f:
torch.save(vocab, f)
if args.dataset == 'WikiText103' or args.dataset == 'WikiText2':
train_dataset, test_dataset, valid_dataset = WLMDataset(vocab=vocab)
elif args.dataset == 'WMTNewsCrawl':
from torchtext.experimental.datasets import WikiText2
test_dataset, valid_dataset = WikiText2(
vocab=vocab, data_select=('test', 'valid'))
train_dataset, = WLMDataset(vocab=vocab, data_select='train')
elif args.dataset == 'EnWik9':
enwik9 = EnWik9()
idx1, idx2 = int(len(enwik9) * 0.8), int(len(enwik9) * 0.9)
train_data = torch.tensor([vocab.stoi[_id]
for _id in enwik9[0:idx1]]).long()
val_data = torch.tensor([vocab.stoi[_id]
for _id in enwik9[idx1:idx2]]).long()
test_data = torch.tensor([vocab.stoi[_id]
for _id in enwik9[idx2:]]).long()
from torchtext.experimental.datasets import LanguageModelingDataset
train_dataset = LanguageModelingDataset(train_data, vocab)
valid_dataset = LanguageModelingDataset(val_data, vocab)
test_dataset = LanguageModelingDataset(test_data, vocab)
elif args.dataset == 'BookCorpus':
train_dataset, test_dataset, valid_dataset = BookCorpus(vocab)
train_data = process_raw_data(train_dataset.data, args)
# if rank is not None:
# # Chunk training data by rank for different gpus
# chunk_len = len(train_data) // args.world_size
# train_data = train_data[(rank * chunk_len):((rank + 1) * chunk_len)]
val_data = process_raw_data(valid_dataset.data, args)
test_data = process_raw_data(test_dataset.data, args)
ntokens = len(train_dataset.get_vocab())
if args.checkpoint != 'None':
model = torch.load(args.checkpoint)
else:
model = MLMTask(
ntokens, args.emsize, args.nhead,
args.nhid, args.nlayers, args.dropout)
if args.parallel == 'DDP':
model = model.to(device[0])
model = DDP(model, device_ids=device)
else:
model = model.to(device)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=args.lr)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, 10.0, gamma=0.1)
adaptdl.torch.init_process_group(
"nccl" if torch.cuda.is_available() else "gloo")
adl_model = adaptdl.torch.AdaptiveDataParallel(model, optimizer, scheduler)
best_val_loss = None
train_loss_log, val_loss_log, batch_size_log = [], [], []
tensorboard_dir = os.path.join(
os.getenv("ADAPTDL_TENSORBOARD_LOGDIR", "/tmp")
if adaptdl.env.replica_rank() == 0 else "/tmp",
adaptdl.env.job_id())
writer = SummaryWriter(tensorboard_dir)
for epoch in adaptdl.torch.remaining_epochs_until(args.epochs):
epoch_start_time = time.time()
train(adl_model, train_dataset.vocab, train_loss_log, train_data,
optimizer, criterion, ntokens, epoch, scheduler, args,
device, rank, batch_size_log, writer)
val_loss = evaluate(
val_data, model, train_dataset.vocab, ntokens, criterion, args,
device, test=False, epoch=epoch, writer=writer)
if (rank is None) or (rank == 0):
val_loss_log.append(val_loss)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | '
'valid loss {:5.2f} | valid ppl {:8.2f}'.format(
epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
if not best_val_loss or val_loss < best_val_loss:
if rank is None:
with open(args.save, 'wb') as f:
torch.save(adl_model.module, f)
elif rank == 0:
with open(args.save, 'wb') as f:
torch.save(adl_model.module.state_dict(), f)
best_val_loss = val_loss
else:
scheduler.step()
print("SCHEDULER_STEP")
if args.parallel == 'DDP':
dist.barrier()
rank0_devices = [x - rank * len(device) for x in device]
device_pairs = zip(rank0_devices, device)
map_location = {'cuda:%d' % x: 'cuda:%d' % y for x, y in device_pairs}
model.load_state_dict(
torch.load(args.save, map_location=map_location))
adl_model = adaptdl.torch.AdaptiveDataParallel(model, optimizer, scheduler)
test_loss = evaluate(test_data, model, train_dataset.vocab, ntokens,
criterion, args, device)
if rank == 0:
wrap_up(train_loss_log, val_loss_log, test_loss, args,
adl_model.module, 'mlm_loss.txt', 'full_mlm_model.pt',
batch_size_log)
else:
with open(args.save, 'rb') as f:
adl_model.module.load_state_dict(torch.load(f))
test_loss = evaluate(test_data, model, train_dataset.vocab,
ntokens, criterion, args, device, True)
wrap_up(train_loss_log, val_loss_log, test_loss, args, adl_model.module,
'mlm_loss.txt', 'full_mlm_model.pt')
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='PyTorch Wikitext-2 Transformer Language Model')
parser.add_argument('--emsize', type=int, default=768,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=3072,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=12,
help='number of layers')
parser.add_argument('--nhead', type=int, default=12,
help='the number of heads in the encoder/decoder of'
'the transformer model')
parser.add_argument('--lr', type=float, default=6,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.1,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=8,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=32, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=128,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--seed', type=int, default=5431916812,
help='random seed')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='report interval')
parser.add_argument('--checkpoint', type=str, default='None',
help='path to load the checkpoint')
parser.add_argument('--save', type=str, default='mlm_bert.pt',
help='path to save the final model')
parser.add_argument('--save-vocab', type=str,
default='torchtext_bert_vocab.pt',
help='path to save the vocab')
parser.add_argument('--mask_frac', type=float, default=0.15,
help='the fraction of masked tokens')
parser.add_argument('--dataset', type=str, default='WikiText2',
help='dataset used for MLM task')
parser.add_argument('--parallel', type=str, default='None',
help='Use DataParallel to train model')
parser.add_argument('--world_size', type=int, default=8,
help='the world size to initiate DPP')
parser.add_argument('--gradient-accumulation',
dest='gradient_accumulation',
default=False, action='store_true',
help='Enable gradient accumulation')
args = parser.parse_args()
if args.parallel == 'DDP':
run_demo(run_ddp, run_main, args)
else:
run_main(args, adaptdl.env.replica_rank())
time.sleep(100)
|
evaluation/lfw-classification-unknown.py | ammogcoder/openface | 15,684 | 12629979 | #!/usr/bin/env python2
#
# This files can be used to benchmark different classifiers
# on lfw dataset with known and unknown dataset.
# More info at: https://github.com/cmusatyalab/openface/issues/144
# <NAME> & <NAME>
# 2016/06/28
#
# Copyright 2015-2016 Carnegie Mellon University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
start = time.time()
import argparse
import cv2
import os
import pickle
import shutil # For copy images
import errno
import sys
import operator
from operator import itemgetter
import numpy as np
np.set_printoptions(precision=2)
import pandas as pd
import openface
from sklearn.pipeline import Pipeline
from sklearn.lda import LDA
from sklearn.preprocessing import LabelEncoder
from sklearn.svm import SVC
from sklearn.mixture import GMM
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from nolearn.dbn import DBN
import multiprocessing
fileDir = os.path.dirname(os.path.realpath(__file__))
modelDir = os.path.join(fileDir, '..', 'models')
dlibModelDir = os.path.join(modelDir, 'dlib')
openfaceModelDir = os.path.join(modelDir, 'openface')
sys.path.append('./util/')
align_dlib = __import__('align-dlib')
# The list of available classifiers. The list is used in train() and
# inferFromTest() functions.
clfChoices = [
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree',
'GaussianNB',
'DBN']
def train(args):
start = time.time()
for clfChoice in clfChoices:
print("Loading embeddings.")
fname = "{}/labels.csv".format(args.workDir)
labels = pd.read_csv(fname, header=None).as_matrix()[:, 1]
labels = map(itemgetter(1),
map(os.path.split,
map(os.path.dirname, labels))) # Get the directory.
fname = "{}/reps.csv".format(args.workDir)
embeddings = pd.read_csv(fname, header=None).as_matrix()
le = LabelEncoder().fit(labels)
labelsNum = le.transform(labels)
nClasses = len(le.classes_)
print("Training for {} classes.".format(nClasses))
if clfChoice == 'LinearSvm':
clf = SVC(C=1, kernel='linear', probability=True)
elif clfChoice == 'GMM': # Doesn't work best
clf = GMM(n_components=nClasses)
# ref:
# http://scikit-learn.org/stable/auto_examples/classification/plot_classifier_comparison.html#example-classification-plot-classifier-comparison-py
elif clfChoice == 'RadialSvm': # Radial Basis Function kernel
# works better with C = 1 and gamma = 2
clf = SVC(C=1, kernel='rbf', probability=True, gamma=2)
elif clfChoice == 'DecisionTree': # Doesn't work best
clf = DecisionTreeClassifier(max_depth=20)
elif clfChoice == 'GaussianNB':
clf = GaussianNB()
# ref: https://jessesw.com/Deep-Learning/
elif clfChoice == 'DBN':
if args.verbose:
verbose = 1
else:
verbose = 0
clf = DBN([embeddings.shape[1], 500, labelsNum[-1:][0] + 1], # i/p nodes, hidden nodes, o/p nodes
learn_rates=0.3,
# Smaller steps mean a possibly more accurate result, but the
# training will take longer
learn_rate_decays=0.9,
# a factor the initial learning rate will be multiplied by
# after each iteration of the training
epochs=300, # no of iternation
# dropouts = 0.25, # Express the percentage of nodes that
# will be randomly dropped as a decimal.
verbose=verbose)
if args.ldaDim > 0:
clf_final = clf
clf = Pipeline([('lda', LDA(n_components=args.ldaDim)),
('clf', clf_final)])
clf.fit(embeddings, labelsNum)
fName = os.path.join(args.workDir, clfChoice + ".pkl")
print("Saving classifier to '{}'".format(fName))
with open(fName, 'w') as f:
pickle.dump((le, clf), f)
if args.verbose:
print(
"Training and saving the classifiers took {} seconds.".format(
time.time() - start))
def getRep(imgPath):
start = time.time()
bgrImg = cv2.imread(imgPath)
if bgrImg is None:
raise Exception("Unable to load image: {}".format(imgPath))
rgbImg = cv2.cvtColor(bgrImg, cv2.COLOR_BGR2RGB)
if args.verbose:
print(" + Original size: {}".format(rgbImg.shape))
if args.verbose:
print("Loading the image took {} seconds.".format(time.time() - start))
start = time.time()
bb = align.getLargestFaceBoundingBox(rgbImg)
if (bb is None):
raise Exception("Unable to find a face: {}".format(imgPath))
if args.verbose:
print("Face detection took {} seconds.".format(time.time() - start))
start = time.time()
alignedFace = align.align(
args.imgDim,
rgbImg,
bb,
landmarkIndices=openface.AlignDlib.OUTER_EYES_AND_NOSE)
if alignedFace is None:
raise Exception("Unable to align image: {}".format(imgPath))
if args.verbose:
print("Alignment took {} seconds.".format(time.time() - start))
start = time.time()
rep = net.forward(alignedFace)
if args.verbose:
print(
"Neural network forward pass took {} seconds.".format(
time.time() - start))
return rep
def inferFromTest(args):
for clfChoice in clfChoices:
print ("===============")
print ("Using the classifier: " + clfChoice)
with open(os.path.join(args.featureFolder[0], clfChoice + ".pkl"), 'r') as f_clf:
if sys.version_info[0] < 3:
(le, clf) = pickle.load(f_clf)
else:
(le, clf) = pickle.load(f_clf, encoding='latin1')
correctPrediction = 0
inCorrectPrediction = 0
sumConfidence = 0.0
testSet = [
os.path.join(
args.testFolder[0], f) for f in os.listdir(
args.testFolder[0]) if not f.endswith('.DS_Store')]
for personSet in testSet:
personImages = [os.path.join(personSet, f) for f in os.listdir(
personSet) if not f.endswith('.DS_Store')]
for img in personImages:
if args.verbose:
print("\n=== {} ===".format(img.split('/')[-1:][0]))
try:
rep = getRep(img).reshape(1, -1)
except Exception as e:
print (e)
continue
start = time.time()
predictions = clf.predict_proba(rep).ravel()
maxI = np.argmax(predictions)
person = le.inverse_transform(maxI)
confidence = predictions[maxI]
if args.verbose:
print(
"Prediction took {} seconds.".format(
time.time() - start))
if args.verbose:
print(
"Predict {} with {:.2f} confidence.".format(
person.decode('utf-8'), confidence))
sumConfidence += confidence
if confidence <= args.threshold and args.unknown:
person = "_unknown"
if (img.split('/')[-1:][0].split('.')[0][:-5] == person and not args.unknown) or (person == "_unknown" and args.unknown):
correctPrediction += 1
else:
inCorrectPrediction += 1
if isinstance(clf, GMM) and args.verbose:
dist = np.linalg.norm(rep - clf.means_[maxI])
print(" + Distance from the mean: {}".format(dist))
print ("Results for the classifier: " + clfChoice)
print ("Correct Prediction :" + str(correctPrediction))
print ("In-correct Prediction: " + str(inCorrectPrediction))
print ("Accuracy :" + str(float(correctPrediction) / (correctPrediction + inCorrectPrediction)))
print ("Avg Confidence: " + str(float(sumConfidence) / (correctPrediction + inCorrectPrediction)))
def preprocess(args):
start = time.time()
lfwPath = args.lfwDir
destPath = args.featuresDir
fullFaceDirectory = [os.path.join(lfwPath, f) for f in os.listdir(
lfwPath) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
noOfImages = []
folderName = []
for folder in fullFaceDirectory:
try:
noOfImages.append(len(os.listdir(folder)))
folderName.append(folder.split('/')[-1:][0])
# print (folder.split('/')[-1:][0] +": " +
# str(len(os.listdir(folder))))
except:
pass
# Sorting
noOfImages_sorted, folderName_sorted = zip(
*sorted(zip(noOfImages, folderName), key=operator.itemgetter(0), reverse=True))
with open(os.path.join(destPath, "List_of_folders_and_number_of_images.txt"), "w") as text_file:
for f, n in zip(folderName_sorted, noOfImages_sorted):
text_file.write("{} : {} \n".format(f, n))
if args.verbose:
print ("Sorting lfw dataset took {} seconds.".format(time.time() - start))
start = time.time()
# Copy known train dataset
for i in range(int(args.rangeOfPeople.split(':')[0]), int(
args.rangeOfPeople.split(':')[1])):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'train_known_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print ("Copying train dataset from lfw took {} seconds.".format(time.time() - start))
start = time.time()
# Take 10% images from train dataset as test dataset for known
train_known_raw = [
os.path.join(
os.path.join(
destPath,
'train_known_raw'),
f) for f in os.listdir(
os.path.join(
destPath,
'train_known_raw')) if not f.endswith('.DS_Store')] # .DS_Store for the OS X
for folder in train_known_raw:
images = [os.path.join(folder, f) for f in os.listdir(
folder) if not f.endswith('.DS_Store')]
if not os.path.exists(os.path.join(
destPath, 'test_known_raw', folder.split('/')[-1:][0])):
os.makedirs(os.path.join(destPath, 'test_known_raw',
folder.split('/')[-1:][0]))
# print ("Created {}".format(os.path.join(destPath,
# 'test_known_raw', folder.split('/')[-1:][0])))
for i in range(int(0.9 * len(images)), len(images)):
destFile = os.path.join(destPath, 'test_known_raw', folder.split(
'/')[-1:][0], images[i].split('/')[-1:][0])
try:
shutil.move(images[i], destFile)
except:
pass
if args.verbose:
print ("Spliting lfw dataset took {} seconds.".format(time.time() - start))
start = time.time()
# Copy unknown test dataset
for i in range(int(args.rangeOfPeople.split(':')
[1]), len(folderName_sorted)):
src = os.path.join(lfwPath, folderName_sorted[i])
try:
destFolder = os.path.join(
destPath, 'test_unknown_raw', folderName_sorted[i])
shutil.copytree(src, destFolder)
except OSError as e:
# If the error was caused because the source wasn't a directory
if e.errno == errno.ENOTDIR:
shutil.copy(src, destFolder)
else:
if args.verbose:
print('Directory not copied. Error: %s' % e)
if args.verbose:
print ("Copying test dataset from lfw took {} seconds.".format(time.time() - start))
start = time.time()
class Args():
"""
This class is created to pass arguments to ./util/align-dlib.py
"""
def __init__(self, inputDir, outputDir, verbose):
self.inputDir = inputDir
self.dlibFacePredictor = os.path.join(
dlibModelDir, "shape_predictor_68_face_landmarks.dat")
self.mode = 'align'
self.landmarks = 'outerEyesAndNose'
self.size = 96
self.outputDir = outputDir
self.skipMulti = True
self.verbose = verbose
self.fallbackLfw = False
argsForAlign = Args(
os.path.join(
destPath,
'train_known_raw'),
os.path.join(
destPath,
'train_known_aligned'),
args.verbose)
jobs = []
for i in range(8):
p = multiprocessing.Process(
target=align_dlib.alignMain, args=(
argsForAlign,))
jobs.append(p)
p.start()
for p in jobs:
p.join()
if args.verbose:
print ("Aligning the raw train data took {} seconds.".format(time.time() - start))
start = time.time()
os.system(
'./batch-represent/main.lua -outDir ' +
os.path.join(
destPath,
'train_known_features') +
' -data ' +
os.path.join(
destPath,
'train_known_aligned'))
if args.verbose:
print ("Extracting features from aligned train data took {} seconds.".format(time.time() - start))
start = time.time()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--dlibFacePredictor',
type=str,
help="Path to dlib's face predictor.",
default=os.path.join(
dlibModelDir,
"shape_predictor_68_face_landmarks.dat"))
parser.add_argument(
'--networkModel',
type=str,
help="Path to Torch network model.",
default=os.path.join(
openfaceModelDir,
'nn4.small2.v1.t7'))
parser.add_argument('--imgDim', type=int,
help="Default image dimension.", default=96)
parser.add_argument('--cuda', action='store_true')
parser.add_argument('--verbose', action='store_true')
subparsers = parser.add_subparsers(dest='mode', help="Mode")
trainParser = subparsers.add_parser('train',
help="Train a new classifier.")
trainParser.add_argument('--ldaDim', type=int, default=-1)
trainParser.add_argument(
'--classifier',
type=str,
choices=[
'LinearSvm',
'GMM',
'RadialSvm',
'DecisionTree'],
help='The type of classifier to use.',
default='LinearSvm')
trainParser.add_argument(
'workDir',
type=str,
help="The input work directory containing 'reps.csv' and 'labels.csv'. Obtained from aligning a directory with 'align-dlib' and getting the representations with 'batch-represent'.")
inferParser = subparsers.add_parser(
'infer', help='Predict who an image contains from a trained classifier.')
inferParser.add_argument(
'classifierModel',
type=str,
help='The Python pickle representing the classifier. This is NOT the Torch network model, which can be set with --networkModel.')
inferParser.add_argument('imgs', type=str, nargs='+',
help="Input image.")
inferFromTestParser = subparsers.add_parser(
'inferFromTest',
help='Predict who an image contains from a trained classifier.')
# inferFromTestParser.add_argument('--classifierModel', type=str,
# help='The Python pickle representing the classifier. This is NOT the
# Torch network model, which can be set with --networkModel.')
inferFromTestParser.add_argument(
'featureFolder',
type=str,
nargs='+',
help="Input the fratures folder which has the classifiers.")
inferFromTestParser.add_argument(
'testFolder',
type=str,
nargs='+',
help="Input the test folder. It can be either known test dataset or unknown test dataset.")
inferFromTestParser.add_argument(
'--threshold',
type=float,
nargs='+',
help="Threshold of the confidence to classify a prediction as unknown person. <threshold will be predicted as unknown person.",
default=0.0)
inferFromTestParser.add_argument(
'--unknown',
action='store_true',
help="Use this flag if you are testing on unknown dataset. Make sure you set thresold value")
preprocessParser = subparsers.add_parser(
'preprocess',
help='Before Benchmarking preprocess divides the dataset into train and test pairs. Also it will align the train dataset and extract the features from it.')
preprocessParser.add_argument('--lfwDir', type=str,
help="Enter the lfw face directory")
preprocessParser.add_argument(
'--rangeOfPeople',
type=str,
help="Range of the people you would like to take as known person group. Not that the input is a list starts with 0 and the people are sorted in decending order of number of images. Eg: 0:10 ")
preprocessParser.add_argument(
'--featuresDir',
type=str,
help="Enter the directory location where the aligned images, features, and classifer model will be saved.")
args = parser.parse_args()
if args.verbose:
print("Argument parsing and import libraries took {} seconds.".format(
time.time() - start))
start = time.time()
align = openface.AlignDlib(args.dlibFacePredictor)
net = openface.TorchNeuralNet(args.networkModel, imgDim=args.imgDim,
cuda=args.cuda)
if args.verbose:
print("Loading the dlib and OpenFace models took {} seconds.".format(
time.time() - start))
start = time.time()
if args.mode == 'train':
train(args)
elif args.mode == 'infer':
# infer(args)
raise Exception("Use ./demo/classifier.py")
elif args.mode == 'inferFromTest':
inferFromTest(args)
elif args.mode == 'preprocess':
preprocess(args)
|
examples/serialized.py | wyfo/apimodel | 118 | 12630014 | <gh_stars>100-1000
from dataclasses import dataclass
from apischema import serialize, serialized
from apischema.json_schema import serialization_schema
@dataclass
class Foo:
@serialized
@property
def bar(self) -> int:
return 0
# Serialized method can have default argument
@serialized
def baz(self, some_arg_with_default: int = 1) -> int:
return some_arg_with_default
@serialized("aliased")
@property
def with_alias(self) -> int:
return 2
# Serialized method can also be defined outside class,
# but first parameter must be annotated
@serialized
def function(foo: Foo) -> int:
return 3
assert serialize(Foo, Foo()) == {"bar": 0, "baz": 1, "aliased": 2, "function": 3}
assert serialization_schema(Foo) == {
"$schema": "http://json-schema.org/draft/2020-12/schema#",
"type": "object",
"properties": {
"aliased": {"type": "integer"},
"bar": {"type": "integer"},
"baz": {"type": "integer"},
"function": {"type": "integer"},
},
"required": ["bar", "baz", "aliased", "function"],
"additionalProperties": False,
}
|
2021/05/24/Using Async Functions Inside of Flask Routes/flaskasync/auth.py | Ujjawal-Rajput/yes | 492 | 12630023 | <reponame>Ujjawal-Rajput/yes
APIKEY = 'YOUR API KEY'
auth=("live_yourapikey", "") |
hikyuu/draw/drawplot/matplotlib_draw.py | kknet/hikyuu | 1,283 | 12630029 | <gh_stars>1000+
# -*- coding: utf8 -*-
# cp936
"""
交互模式下绘制相关图形,如K线图,美式K线图
"""
import sys
import datetime
import numpy as np
import matplotlib
from pylab import Rectangle, gca, figure, ylabel, axes, draw
from matplotlib.lines import Line2D, TICKLEFT, TICKRIGHT
from matplotlib.ticker import FuncFormatter, FixedLocator
from hikyuu import *
from .common import get_draw_title
def create_one_axes_figure(figsize=(10, 6)):
"""生成一个仅含有1个坐标轴的figure,并返回其坐标轴对象
:param figsize: (宽, 高)
:return: ax
"""
rect1 = [0.05, 0.05, 0.9, 0.90]
fg = figure(figsize=figsize)
ax1 = fg.add_axes(rect1)
return ax1
def create_two_axes_figure(figsize=(10, 8)):
"""生成一个含有2个坐标轴的figure,并返回坐标轴列表
:param figsize: (宽, 高)
:return: (ax1, ax2)
"""
rect1 = [0.05, 0.35, 0.9, 0.60]
rect2 = [0.05, 0.05, 0.9, 0.30]
fg = figure(figsize=figsize)
ax1 = fg.add_axes(rect1)
ax2 = fg.add_axes(rect2, sharex=ax1)
return ax1, ax2
def create_three_axes_figure(figsize=(10, 8)):
"""生成一个含有3个坐标轴的figure,并返回坐标轴列表
:param figsize: (宽, 高)
:return: (ax1, ax2, ax3)
"""
rect1 = [0.05, 0.45, 0.9, 0.50]
rect2 = [0.05, 0.25, 0.9, 0.20]
rect3 = [0.05, 0.05, 0.9, 0.20]
fg = figure(figsize=figsize)
ax1 = fg.add_axes(rect1)
ax2 = fg.add_axes(rect2, sharex=ax1)
ax3 = fg.add_axes(rect3, sharex=ax1)
return ax1, ax2, ax3
def create_four_axes_figure(figsize=(10, 8)):
"""生成一个含有4个坐标轴的figure,并返回坐标轴列表
:param figsize: (宽, 高)
:return: (ax1, ax2, ax3, ax4)
"""
rect1 = [0.05, 0.50, 0.9, 0.45]
rect2 = [0.05, 0.35, 0.9, 0.15]
rect3 = [0.05, 0.20, 0.9, 0.15]
rect4 = [0.05, 0.05, 0.9, 0.15]
fg = figure(figsize=figsize)
ax1 = fg.add_axes(rect1)
ax2 = fg.add_axes(rect2, sharex=ax1)
ax3 = fg.add_axes(rect3, sharex=ax1)
ax4 = fg.add_axes(rect4, sharex=ax1)
return ax1, ax2, ax3, ax4
def create_figure(n=1, figsize=(10, 8)):
"""生成含有指定坐标轴数量的窗口,最大只支持4个坐标轴。
:param int n: 坐标轴数量
:param figsize: (宽, 高)
:return: (ax1, ax2, ...) 根据指定的坐标轴数量而定,超出[1,4]个坐标轴时,返回None
"""
if n == 1:
return create_one_axes_figure(figsize)
elif n == 2:
return create_two_axes_figure(figsize)
elif n == 3:
return create_three_axes_figure(figsize)
elif n == 4:
return create_four_axes_figure(figsize)
else:
print("Max support axes number is 4!")
return None
class StockFuncFormatter(object):
"""用于坐标轴显示日期
关于matplotlib中FuncFormatter的使用方法,请参见:
http://matplotlib.sourceforge.net/examples/api/date_index_formatter.html
"""
def __init__(self, ix2date):
self.__ix2date = ix2date
def __call__(self, x, pos=None): #IGNORE:W0613
result = ''
ix = int(x)
if ix in self.__ix2date:
result = self.__ix2date[ix]
return result
def getDayLocatorAndFormatter(dates):
"""获取显示日线时使用的Major Locator和Major Formatter"""
sep = len(dates) / 8
loc = [
(
i, str(d) if
(i !=
(len(dates) - 1)) and (i % sep != 0) else "{}-{}-{}".format(d.year, d.month, d.day)
) for i, d in enumerate(dates)
]
fixed_loc = [
i for i in range(len(dates)) if (i == (len(dates) - 1)) or (i != 0 and i % sep == 0)
]
month_loc = FixedLocator(fixed_loc)
month_fm = FuncFormatter(StockFuncFormatter(dict(loc)))
return month_loc, month_fm
def getMinLocatorAndFormatter(dates):
"""获取显示分钟线时使用的Major Locator和Major Formatter"""
sep = len(dates) / 5
loc = [
(
i, str(d)
if i % sep != 0 else "{}-{}-{} {}:{}".format(d.year, d.month, d.day, d.hour, d.minute)
) for i, d in enumerate(dates)
]
fixed_loc = [i for i in range(len(dates)) if i != 0 and i % sep == 0]
month_loc = FixedLocator(fixed_loc)
month_fm = FuncFormatter(StockFuncFormatter(dict(loc)))
return month_loc, month_fm
def ax_set_locator_formatter(axes, dates, typ):
""" 设置指定坐标轴的日期显示,根据指定的K线类型优化X轴坐标显示
:param axes: 指定的坐标轴
:param dates: Datetime构成可迭代序列
:param Query.KType typ: K线类型
"""
major_loc, major_fm = None, None
if typ == Query.DAY:
major_loc, major_fm = getDayLocatorAndFormatter(dates)
elif typ == Query.WEEK:
major_loc, major_fm = getDayLocatorAndFormatter(dates)
elif typ == Query.MONTH:
major_loc, major_fm = getDayLocatorAndFormatter(dates)
elif typ == Query.QUARTER:
major_loc, major_fm = getDayLocatorAndFormatter(dates)
elif typ == Query.HALFYEAR:
major_loc, major_fm = getDayLocatorAndFormatter(dates)
elif typ == Query.YEAR:
major_loc, major_fm = getDayLocatorAndFormatter(dates)
else:
major_loc, major_fm = getMinLocatorAndFormatter(dates)
axes.xaxis.set_major_locator(major_loc)
axes.xaxis.set_major_formatter(major_fm)
def adjust_axes_show(axeslist):
"""用于调整上下紧密相连的坐标轴显示时,其上一坐标轴最小值刻度和下一坐标轴最大值刻度
显示重叠的问题。
:param axeslist: 上下相连的坐标轴列表 (ax1,ax2,...)
"""
for ax in axeslist[:-1]:
for label in ax.get_xticklabels():
label.set_visible(False)
ylabels = ax.get_yticklabels()
ylabels[0].set_visible(False)
def kplot(kdata, new=True, axes=None, colorup='r', colordown='g'):
"""绘制K线图
:param KData kdata: K线数据
:param bool new: 是否在新窗口中显示,只在没有指定axes时生效
:param axes: 指定的坐标轴
:param colorup: the color of the rectangle where close >= open
:param colordown: the color of the rectangle where close < open
"""
if not kdata:
print("kdata is None")
return
if not axes:
axes = create_figure() if new else gca()
alpha = 1.0
width = 0.6
OFFSET = width / 2.0
rfcolor = matplotlib.rcParams['axes.facecolor']
for i in range(len(kdata)):
record = kdata[i]
open, high, low, close = record.open, record.high, record.low, record.close
if close >= open:
color = colorup
lower = open
height = close - open
rect = Rectangle(
xy=(i - OFFSET, lower),
width=width,
height=height,
facecolor=rfcolor,
edgecolor=color
)
else:
color = colordown
lower = close
height = open - close
rect = Rectangle(
xy=(i - OFFSET, lower),
width=width,
height=height,
facecolor=color,
edgecolor=color
)
vline1 = Line2D(
xdata=(i, i), ydata=(low, lower), color=color, linewidth=0.5, antialiased=True
)
vline2 = Line2D(
xdata=(i, i),
ydata=(lower + height, high),
color=color,
linewidth=0.5,
antialiased=True
)
rect.set_alpha(alpha)
axes.add_line(vline1)
axes.add_line(vline2)
axes.add_patch(rect)
title = get_draw_title(kdata)
axes.set_title(title)
last_record = kdata[-1]
color = 'r' if last_record.close > kdata[-2].close else 'g'
text = u'%s 开:%.2f 高:%.2f 低:%.2f 收:%.2f 涨幅:%.2f%%' % (
last_record.datetime.number / 10000, last_record.open, last_record.high, last_record.low,
last_record.close, 100 * (last_record.close - kdata[-2].close) / kdata[-2].close
)
axes.text(
0.99,
0.97,
text,
horizontalalignment='right',
verticalalignment='top',
transform=axes.transAxes,
color=color
)
axes.autoscale_view()
axes.set_xlim(-1, len(kdata) + 1)
ax_set_locator_formatter(axes, kdata.get_datetime_list(), kdata.get_query().ktype)
#draw()
def mkplot(kdata, new=True, axes=None, colorup='r', colordown='g', ticksize=3):
"""绘制美式K线图
:param KData kdata: K线数据
:param bool new: 是否在新窗口中显示,只在没有指定axes时生效
:param axes: 指定的坐标轴
:param colorup: the color of the lines where close >= open
:param colordown: the color of the lines where close < open
:param ticksize: open/close tick marker in points
"""
if not kdata:
print("kdata is None")
return
if not axes:
axes = create_figure() if new else gca()
for t in range(len(kdata)):
record = kdata[t]
open, high, low, close = record.open, record.high, record.low, record.close
color = colorup if close >= open else colordown
vline = Line2D(xdata=(t, t), ydata=(low, high), color=color, antialiased=False)
oline = Line2D(
xdata=(t, t),
ydata=(open, open),
color=color,
antialiased=False,
marker=TICKLEFT,
markersize=ticksize
)
cline = Line2D(
xdata=(t, t),
ydata=(close, close),
color=color,
antialiased=False,
markersize=ticksize,
marker=TICKRIGHT
)
axes.add_line(vline)
axes.add_line(oline)
axes.add_line(cline)
title = get_draw_title(kdata)
axes.set_title(title)
last_record = kdata[-1]
color = 'r' if last_record.close > kdata[-2].close else 'g'
text = u'%s 开:%.2f 高:%.2f 低:%.2f 收:%.2f' % (
last_record.datetime.number / 10000, last_record.open, last_record.high, last_record.low,
last_record.close
)
axes.text(
0.99,
0.97,
text,
horizontalalignment='right',
verticalalignment='top',
transform=axes.transAxes,
color=color
)
axes.autoscale_view()
axes.set_xlim(-1, len(kdata) + 1)
ax_set_locator_formatter(axes, kdata.get_datetime_list(), kdata.get_query().ktype)
#draw()
def iplot(
indicator,
new=True,
axes=None,
kref=None,
legend_on=False,
text_on=False,
text_color='k',
zero_on=False,
label=None,
*args,
**kwargs
):
"""绘制indicator曲线
:param Indicator indicator: indicator实例
:param axes: 指定的坐标轴
:param new: 是否在新窗口中显示,只在没有指定axes时生效
:param kref: 参考的K线数据,以便绘制日期X坐标
:param legend_on: 是否打开图例
:param text_on: 是否在左上角显示指标名称及其参数
:param text_color: 指标名称解释文字的颜色,默认为黑色
:param zero_on: 是否需要在y=0轴上绘制一条直线
:param str label: label显示文字信息,text_on 及 legend_on 为 True 时生效
:param args: pylab plot参数
:param kwargs: pylab plot参数,如:marker(标记类型)、
markerfacecolor(标记颜色)、
markeredgecolor(标记的边缘颜色)
"""
if not indicator:
print("indicator is None")
return
if not axes:
axes = create_figure() if new else gca()
if not label:
label = "%s %.2f" % (indicator.long_name, indicator[-1])
py_indicatr = [None if x == constant.null_price else x for x in indicator]
axes.plot(py_indicatr, '-', label=label, *args, **kwargs)
if legend_on:
leg = axes.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
if text_on:
if not axes.texts:
axes.text(
0.01,
0.97,
label,
horizontalalignment='left',
verticalalignment='top',
transform=axes.transAxes,
color=text_color
)
else:
temp_str = axes.texts[0].get_text() + ' ' + label
axes.texts[0].set_text(temp_str)
if zero_on:
ylim = axes.get_ylim()
if ylim[0] < 0 < ylim[1]:
axes.hlines(0, 0, len(indicator))
axes.autoscale_view()
axes.set_xlim(-1, len(indicator) + 1)
if kref:
ax_set_locator_formatter(axes, kref.get_datetime_list(), kref.get_query().ktype)
#draw()
def ibar(
indicator,
new=True,
axes=None,
kref=None,
legend_on=False,
text_on=False,
text_color='k',
label=None,
width=0.4,
color='r',
edgecolor='r',
zero_on=False,
*args,
**kwargs
):
"""绘制indicator柱状图
:param Indicator indicator: Indicator实例
:param axes: 指定的坐标轴
:param new: 是否在新窗口中显示,只在没有指定axes时生效
:param kref: 参考的K线数据,以便绘制日期X坐标
:param legend_on: 是否打开图例
:param text_on: 是否在左上角显示指标名称及其参数
:param text_color: 指标名称解释文字的颜色,默认为黑色
:param str label: label显示文字信息,text_on 及 legend_on 为 True 时生效
:param zero_on: 是否需要在y=0轴上绘制一条直线
:param width: Bar的宽度
:param color: Bar的颜色
:param edgecolor: Bar边缘颜色
:param args: pylab plot参数
:param kwargs: pylab plot参数
"""
if not indicator:
print("indicator is None")
return
if not axes:
axes = create_figure() if new else gca()
if not label:
label = "%s %.2f" % (indicator.long_name, indicator[-1])
py_indicatr = [None if x == constant.null_price else x for x in indicator]
x = [i - 0.2 for i in range(len(indicator))]
y = py_indicatr
axes.bar(x, py_indicatr, width=width, color=color, edgecolor=edgecolor, *args, **kwargs)
if legend_on:
leg = axes.legend(loc='upper left')
leg.get_frame().set_alpha(0.5)
if text_on:
if not axes.texts:
axes.text(
0.01,
0.97,
label,
horizontalalignment='left',
verticalalignment='top',
transform=axes.transAxes,
color=text_color
)
else:
temp_str = axes.texts[0].get_text() + ' ' + label
axes.texts[0].set_text(temp_str)
if zero_on:
ylim = axes.get_ylim()
if ylim[0] < 0 < ylim[1]:
axes.hlines(0, 0, len(indicator))
axes.autoscale_view()
axes.set_xlim(-1, len(indicator) + 1)
if kref:
ax_set_locator_formatter(axes, kref.get_datetime_list(), kref.get_query().ktype)
#draw()
def ax_draw_macd(axes, kdata, n1=12, n2=26, n3=9):
"""绘制MACD
:param axes: 指定的坐标轴
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
macd = MACD(CLOSE(kdata), n1, n2, n3)
bmacd, fmacd, smacd = macd.get_result(0), macd.get_result(1), macd.get_result(2)
text = 'MACD(%s,%s,%s) DIF:%.2f, DEA:%.2f, BAR:%.2f' % (
n1, n2, n3, fmacd[-1], smacd[-1], bmacd[-1]
)
axes.text(
0.01,
0.97,
text,
horizontalalignment='left',
verticalalignment='top',
transform=axes.transAxes
)
total = len(kdata)
x = [i - 0.2 for i in range(total)]
x1 = [x[i] for i, d in enumerate(bmacd) if d > 0]
y1 = [i for i in bmacd if i > 0]
x2 = [x[i] for i, d in enumerate(bmacd) if d <= 0]
y2 = [i for i in bmacd if i <= 0]
axes.bar(x1, y1, width=0.4, color='r', edgecolor='r')
axes.bar(x2, y2, width=0.4, color='g', edgecolor='g')
axt = axes.twinx()
axt.grid(False)
axt.set_yticks([])
fmacd.plot(axes=axt, linestyle='--', legend_on=False, text_on=False)
smacd.plot(axes=axt, legend_on=False, text_on=False)
for label in axt.get_xticklabels():
label.set_visible(False)
def ax_draw_macd2(axes, ref, kdata, n1=12, n2=26, n3=9):
"""绘制MACD。
当BAR值变化与参考序列ref变化不一致时,显示为灰色,
当BAR和参考序列ref同时上涨,显示红色
当BAR和参考序列ref同时下跌,显示绿色
:param axes: 指定的坐标轴
:param ref: 参考序列,EMA
:param KData kdata: KData
:param int n1: 指标 MACD 的参数1
:param int n2: 指标 MACD 的参数2
:param int n3: 指标 MACD 的参数3
"""
macd = MACD(CLOSE(kdata), n1, n2, n3)
bmacd, fmacd, smacd = macd.get_result(0), macd.get_result(1), macd.get_result(2)
text = 'MACD(%s,%s,%s) DIF:%.2f, DEA:%.2f, BAR:%.2f' % (
n1, n2, n3, fmacd[-1], smacd[-1], bmacd[-1]
)
axes.text(
0.01,
0.97,
text,
horizontalalignment='left',
verticalalignment='top',
transform=axes.transAxes
)
total = len(kdata)
x = [i - 0.2 for i in range(0, total)]
y = bmacd
x1, x2, x3 = [x[0]], [], []
y1, y2, y3 = [y[0]], [], []
for i in range(1, total):
if ref[i] - ref[i - 1] > 0 and y[i] - y[i - 1] > 0:
x2.append(x[i])
y2.append(y[i])
elif ref[i] - ref[i - 1] < 0 and y[i] - y[i - 1] < 0:
x3.append(x[i])
y3.append(y[i])
else:
x1.append(x[i])
y1.append(y[i])
axes.bar(x1, y1, width=0.4, color='#BFBFBF', edgecolor='#BFBFBF')
axes.bar(x2, y2, width=0.4, color='r', edgecolor='r')
axes.bar(x3, y3, width=0.4, color='g', edgecolor='g')
axt = axes.twinx()
axt.grid(False)
axt.set_yticks([])
fmacd.plot(axes=axt, linestyle='--', legend_on=False, text_on=False)
smacd.plot(axes=axt, legend_on=False, text_on=False)
for label in axt.get_xticklabels():
label.set_visible(False)
def sgplot(sg, new=True, axes=None, style=1, kdata=None):
"""绘制买入/卖出信号
:param SignalBase sg: 信号指示器
:param new: 仅在未指定axes的情况下生效,当为True时,创建新的窗口对象并在其中进行绘制
:param axes: 指定在那个轴对象中进行绘制
:param style: 1 | 2 信号箭头绘制样式
:param KData kdata: 指定的KData(即信号发生器的交易对象),
如该值为None,则认为该信号发生器已经指定了交易对象,
否则,使用该参数作为交易对象
"""
kdata = sg.to if kdata is None else kdata
refdates = kdata.get_datetime_list()
date_index = dict([(d, i) for i, d in enumerate(refdates)])
if axes is None:
if new:
axes = create_figure()
kplot(kdata, axes=axes)
else:
axes = gca()
ylim = axes.get_ylim()
height = ylim[1] - ylim[0]
if style == 1:
arrow_buy = dict(arrowstyle="->")
arrow_sell = arrow_buy
else:
arrow_buy = dict(facecolor='red', frac=0.5)
arrow_sell = dict(facecolor='blue', frac=0.5)
dates = sg.get_buy_signal()
for d in dates:
if d not in date_index:
continue
pos = date_index[d]
krecord = kdata[pos]
axes.annotate(
'B', (pos, krecord.low - height * 0.01), (pos, krecord.low - height * 0.1),
arrowprops=arrow_buy,
horizontalalignment='center',
verticalalignment='bottom',
color='red'
)
dates = sg.get_sell_signal()
for d in dates:
if d not in date_index:
continue
pos = date_index[d]
krecord = kdata[pos]
axes.annotate(
'S', (pos, krecord.high + height * 0.01), (pos, krecord.high + height * 0.1),
arrowprops=arrow_sell,
horizontalalignment='center',
verticalalignment='top',
color='blue'
)
def cnplot(cn, new=True, axes=None, kdata=None):
"""绘制系统有效条件
:param ConditionBase cn: 系统有效条件
:param new: 仅在未指定axes的情况下生效,当为True时,创建新的窗口对象并在其中进行绘制
:param axes: 指定在那个轴对象中进行绘制
:param KData kdata: 指定的KData,如该值为None,则认为该系统有效条件已经
指定了交易对象,否则,使用该参数作为交易对象
"""
if kdata is None:
kdata = cn.to
else:
cn.to = kdata
refdates = kdata.get_datetime_list()
date_index = dict([(d, i) for i, d in enumerate(refdates)])
if axes is None:
if new:
axes = create_figure()
kplot(kdata, axes=axes)
else:
axes = gca()
x = np.array([i for i in range(len(refdates))])
y1 = np.array([1 if cn.isValid(d) else -1 for d in refdates])
y2 = np.array([-1 if cn.isValid(d) else 1 for d in refdates])
axes.fill_between(x, y1, y2, where=y2 > y1, facecolor='blue', alpha=0.6)
axes.fill_between(x, y1, y2, where=y2 < y1, facecolor='red', alpha=0.6)
def sysplot(sys, new=True, axes=None, style=1):
"""绘制系统实际买入/卖出信号
:param SystemBase sys: 系统实例
:param new: 仅在未指定axes的情况下生效,当为True时,
创建新的窗口对象并在其中进行绘制
:param axes: 指定在那个轴对象中进行绘制
:param style: 1 | 2 信号箭头绘制样式
"""
kdata = sys.to
refdates = kdata.get_datetime_list()
date_index = dict([(d, i) for i, d in enumerate(refdates)])
if axes is None:
if new:
axes = create_figure()
kplot(kdata, axes=axes)
else:
axes = gca()
ylim = axes.get_ylim()
height = ylim[1] - ylim[0]
if style == 1:
arrow_buy = dict(arrowstyle="->")
arrow_sell = arrow_buy
else:
arrow_buy = dict(facecolor='red', frac=0.5)
arrow_sell = dict(facecolor='blue', frac=0.5)
tds = sys.tm.get_trade_list()
buy_dates = []
sell_dates = []
for t in tds:
if t.business == BUSINESS.BUY:
buy_dates.append(t.datetime)
elif t.business == BUSINESS.SELL:
sell_dates.append(t.datetime)
else:
pass
for d in buy_dates:
if d not in date_index:
continue
pos = date_index[d]
krecord = kdata[pos]
axes.annotate(
'B', (pos, krecord.low - height * 0.01), (pos, krecord.low - height * 0.1),
arrowprops=arrow_buy,
horizontalalignment='center',
verticalalignment='bottom',
color='red'
)
for d in sell_dates:
if d not in date_index:
continue
pos = date_index[d]
krecord = kdata[pos]
axes.annotate(
'S', (pos, krecord.high + height * 0.01), (pos, krecord.high + height * 0.1),
arrowprops=arrow_sell,
horizontalalignment='center',
verticalalignment='top',
color='blue'
)
|
flask_admin/model/template.py | caffeinatedMike/flask-admin | 4,440 | 12630048 | <reponame>caffeinatedMike/flask-admin<filename>flask_admin/model/template.py
from flask_admin._compat import pass_context, string_types, reduce
from flask_admin.babel import gettext
class BaseListRowAction(object):
def __init__(self, title=None):
self.title = title
def render(self, context, row_id, row):
raise NotImplementedError()
@pass_context
def render_ctx(self, context, row_id, row):
return self.render(context, row_id, row)
def _resolve_symbol(self, context, symbol):
if '.' in symbol:
parts = symbol.split('.')
m = context.resolve(parts[0])
return reduce(getattr, parts[1:], m)
else:
return context.resolve(symbol)
class LinkRowAction(BaseListRowAction):
def __init__(self, icon_class, url, title=None):
super(LinkRowAction, self).__init__(title=title)
self.url = url
self.icon_class = icon_class
def render(self, context, row_id, row):
m = self._resolve_symbol(context, 'row_actions.link')
if isinstance(self.url, string_types):
url = self.url.format(row_id=row_id)
else:
url = self.url(self, row_id, row)
return m(self, url)
class EndpointLinkRowAction(BaseListRowAction):
def __init__(self, icon_class, endpoint, title=None, id_arg='id', url_args=None):
super(EndpointLinkRowAction, self).__init__(title=title)
self.icon_class = icon_class
self.endpoint = endpoint
self.id_arg = id_arg
self.url_args = url_args
def render(self, context, row_id, row):
m = self._resolve_symbol(context, 'row_actions.link')
get_url = self._resolve_symbol(context, 'get_url')
kwargs = dict(self.url_args) if self.url_args else {}
kwargs[self.id_arg] = row_id
url = get_url(self.endpoint, **kwargs)
return m(self, url)
class TemplateLinkRowAction(BaseListRowAction):
def __init__(self, template_name, title=None):
super(TemplateLinkRowAction, self).__init__(title=title)
self.template_name = template_name
def render(self, context, row_id, row):
m = self._resolve_symbol(context, self.template_name)
return m(self, row_id, row)
class ViewRowAction(TemplateLinkRowAction):
def __init__(self):
super(ViewRowAction, self).__init__(
'row_actions.view_row',
gettext('View Record'))
class ViewPopupRowAction(TemplateLinkRowAction):
def __init__(self):
super(ViewPopupRowAction, self).__init__(
'row_actions.view_row_popup',
gettext('View Record'))
class EditRowAction(TemplateLinkRowAction):
def __init__(self):
super(EditRowAction, self).__init__(
'row_actions.edit_row',
gettext('Edit Record'))
class EditPopupRowAction(TemplateLinkRowAction):
def __init__(self):
super(EditPopupRowAction, self).__init__(
'row_actions.edit_row_popup',
gettext('Edit Record'))
class DeleteRowAction(TemplateLinkRowAction):
def __init__(self):
super(DeleteRowAction, self).__init__(
'row_actions.delete_row',
gettext('Delete Record'))
# Macro helper
def macro(name):
'''
Jinja2 macro list column formatter.
:param name:
Macro name in the current template
'''
def inner(view, context, model, column):
m = context.resolve(name)
if not m:
return m
return m(model=model, column=column)
return inner
|
Chapter4/LeNet/data.py | zxjzxj9/PyTorchIntroduction | 205 | 12630053 | <filename>Chapter4/LeNet/data.py<gh_stars>100-1000
""" 该代码用于LeNet的训练数据集MNIST的载入
"""
from torchvision.datasets import MNIST
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
data_train = MNIST('./data',
download=True,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()]))
data_test = MNIST('./data',
train=False,
download=True,
transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor()]))
data_train_loader = DataLoader(data_train, batch_size=256,
shuffle=True, num_workers=8)
data_test_loader = DataLoader(data_test, batch_size=1024,
num_workers=8)
|
airmozilla/starred/views.py | mozilla/airmozilla | 115 | 12630074 | <reponame>mozilla/airmozilla<gh_stars>100-1000
import collections
from django import http
from django.db.models import Q
from django.shortcuts import render
from django.views.decorators import cache
from django.core.urlresolvers import reverse
from session_csrf import anonymous_csrf
from jsonview.decorators import json_view
from airmozilla.base.utils import paginate
from airmozilla.starred.models import StarredEvent
from airmozilla.main.models import (
Event,
CuratedGroup,
)
@cache.cache_control(private=True)
@anonymous_csrf
@json_view
def sync_starred_events(request):
context = {'csrf_token': request.csrf_token}
if request.user.is_anonymous():
context['ids'] = []
return context
elif request.method == 'POST':
ids = request.POST.getlist('ids')
StarredEvent.objects.filter(user=request.user).exclude(
id__in=ids).delete()
for id in ids:
try:
event = Event.objects.get(id=id)
StarredEvent.objects.get_or_create(
user=request.user,
event=event
)
except Event.DoesNotExist:
# ignore events that don't exist but fail on other errors
pass
starred = StarredEvent.objects.filter(user=request.user)
context['ids'] = list(starred.values_list('event_id', flat=True))
return context
def home(request, page=1):
template_name = 'starred/home.html'
ids = request.GET.get('ids')
if request.is_ajax():
template_name = 'starred/events.html'
if request.user.is_authenticated():
events = (
Event.objects.filter(starredevent__user=request.user)
.order_by('starredevent__created')
)
elif ids:
# If you're not authenticated, you should only be able to see
# public events.
try:
ids = [int(x) for x in ids.split(',')]
except ValueError:
return http.HttpResponseBadRequest('invalid id')
events = Event.objects.filter(
id__in=ids
).filter(
Q(privacy=Event.PRIVACY_PUBLIC) |
Q(privacy=Event.PRIVACY_CONTRIBUTORS)
)
events = sorted(events, key=lambda e: ids.index(e.id))
else:
events = None
starred_paged = next_page_url = prev_page_url = None
if events:
starred_paged = paginate(events, page, 10)
# to simplify the complexity of the template when it tries to make the
# pagination URLs, we just figure it all out here
if starred_paged.has_next():
next_page_url = reverse(
'starred:home',
args=(starred_paged.next_page_number(),)
)
if starred_paged.has_previous():
prev_page_url = reverse(
'starred:home',
args=(starred_paged.previous_page_number(),)
)
curated_groups_map = collections.defaultdict(list)
curated_groups = (
CuratedGroup.objects.filter(event__in=[
x.id for x in starred_paged
])
.values_list('event_id', 'name')
.order_by('name')
)
for event_id, name in curated_groups:
curated_groups_map[event_id].append(name)
def get_curated_groups(event):
if events:
return curated_groups_map.get(event.id)
context = {
'events': starred_paged,
'get_curated_groups': get_curated_groups,
'next_page_url': next_page_url,
'prev_page_url': prev_page_url,
'star_on': True,
}
return render(request, template_name, context)
|
scripts/print_go_ast_as_python.py | voiski/pytago | 206 | 12630087 | """
Dirty script to help generate go_ast code from an actual Go AST.
Paste go code into https://lu4p.github.io/astextract/ and then the AST here.
"""
AST = r"""
&ast.CallExpr {
Fun: &ast.FuncLit {
Type: &ast.FuncType {
Params: &ast.FieldList {
List: []*ast.Field {
&ast.Field {
Names: []*ast.Ident {
&ast.Ident {
Name: "repeated",
},
},
Type: &ast.ArrayType {
Elt: &ast.Ident {
Name: "int",
},
},
},
&ast.Field {
Names: []*ast.Ident {
&ast.Ident {
Name: "n",
},
},
Type: &ast.Ident {
Name: "int",
},
},
},
},
Results: &ast.FieldList {
List: []*ast.Field {
&ast.Field {
Names: []*ast.Ident {
&ast.Ident {
Name: "result",
},
},
Type: &ast.ArrayType {
Elt: &ast.Ident {
Name: "int",
},
},
},
},
},
},
Body: &ast.BlockStmt {
List: []ast.Stmt {
&ast.ForStmt {
Init: &ast.AssignStmt {
Lhs: []ast.Expr {
&ast.Ident {
Name: "i",
},
},
Tok: token.DEFINE,
Rhs: []ast.Expr {
&ast.BasicLit {
Kind: token.INT,
Value: "0",
},
},
},
Cond: &ast.BinaryExpr {
X: &ast.Ident {
Name: "i",
},
Op: token.LSS,
Y: &ast.Ident {
Name: "n",
},
},
Post: &ast.IncDecStmt {
X: &ast.Ident {
Name: "i",
},
Tok: token.INC,
},
Body: &ast.BlockStmt {
List: []ast.Stmt {
&ast.AssignStmt {
Lhs: []ast.Expr {
&ast.Ident {
Name: "result",
},
},
Tok: token.ASSIGN,
Rhs: []ast.Expr {
&ast.CallExpr {
Fun: &ast.Ident {
Name: "append",
},
Args: []ast.Expr {
&ast.Ident {
Name: "result",
},
&ast.Ident {
Name: "repeated",
},
},
Ellipsis: 109,
},
},
},
},
},
},
&ast.ReturnStmt {
Results: []ast.Expr {
&ast.Ident {
Name: "result",
},
},
},
},
},
},
Args: []ast.Expr {
&ast.Ident {
Name: "elts",
},
&ast.Ident {
Name: "number",
},
},
}
"""
def go_ast_to_py(tree: str) -> str:
result = ""
list_closing_levels = []
for line in tree.splitlines(keepends=True):
level = len(line) - len(line.lstrip())
before_quote, *after_quote = line.split('"')
for a, b in {
"&": "",
' {': '(',
'}': ')',
': ': '=',
}.items():
before_quote = before_quote.replace(a, b)
if not after_quote:
before_quote, *after_list = before_quote.split("[]")
if after_list:
before_quote += '[' + '\n'
list_closing_levels.append(level)
elif level in list_closing_levels:
before_quote = before_quote.replace(')', ']')
list_closing_levels.remove(level)
result += '"'.join([before_quote] + after_quote)
return result
if __name__ == '__main__':
print(go_ast_to_py(AST))
|
moveit_commander/src/moveit_commander/__init__.py | Bhavam/moveit | 1,116 | 12630098 | <gh_stars>1000+
from .exception import *
from .roscpp_initializer import *
from .planning_scene_interface import *
from .move_group import *
from .robot import *
from .interpreter import *
|
paperboy/scheduler/luigi/luigi_tasks/common.py | chris-aeviator/paperboy | 233 | 12630106 | import logging
from luigi import Task
from luigi.parameter import Parameter, DateParameter, ParameterVisibility
class BaseTask(Task):
task_id = Parameter()
def __init__(self, *args, **kwargs):
super(BaseTask, self).__init__(*args, **kwargs)
self._reqs = []
self.log = logging # FIXME
self._completed = False
def requires(self):
return self._reqs
def complete(self):
if self._reqs:
if isinstance(self._reqs, list):
return all(r.complete() for r in self._reqs) and self._completed
return self._reqs.complete() and self._completed
return self._completed
class JobTask(BaseTask):
job = Parameter(visibility=ParameterVisibility.HIDDEN)
def __init__(self, *args, **kwargs):
super(JobTask, self).__init__(*args, **kwargs)
def run(self):
self.log.critical('job')
self._completed = True
class JobCleanupTask(BaseTask):
job = Parameter(visibility=ParameterVisibility.HIDDEN)
interval = Parameter()
start_date = DateParameter()
owner = Parameter()
email = Parameter()
time = DateParameter()
def __init__(self, *args, **kwargs):
super(JobCleanupTask, self).__init__(*args, **kwargs)
def run(self):
self.log.critical('job-cleanup')
self._completed = True
class ReportTask(BaseTask):
report = Parameter(visibility=ParameterVisibility.HIDDEN)
def __init__(self, *args, **kwargs):
super(ReportTask, self).__init__(*args, **kwargs)
def run(self):
self.log.critical('report')
self._completed = True
|
aeroot/__init__.py | CKAndroidProject/AERoot | 116 | 12630114 | <reponame>CKAndroidProject/AERoot<filename>aeroot/__init__.py
"""
AERoot module
"""
__version__ = "0.3.2"
|
detector/ctpn/train.py | qiu9yu/Lets_OCR | 671 | 12630118 | import torch.optim as optim
import torch
import cv2
import lib.tag_anchor
import lib.generate_gt_anchor
import lib.dataset_handler
import lib.utils
import numpy as np
import os
import Net.net as Net
import Net.loss as Loss
import ConfigParser
import time
import evaluate
import logging
import datetime
import copy
import random
import matplotlib.pyplot as plt
DRAW_PREFIX = './anchor_draw'
MSRA = '/home/ljs/data_ready/MSRA_TD500'
ALI = '/home/ljs/data_ready/ali_icpr'
DATASET_LIST = [MSRA, ALI]
MODEL_SAVE_PATH = '/model'
def loop_files(path):
files = []
l = os.listdir(path)
for f in l:
files.append(os.path.join(path, f))
return files
def create_train_val():
train_im_list = []
test_im_list = []
train_gt_list = []
test_gt_list = []
for dataset in DATASET_LIST:
trains_im_path =os.path.join(dataset, 'train_im')
tests_im_path = os.path.join(dataset, 'test_im')
trains_gt_path =os.path.join(dataset, 'train_gt')
test_gt_path = os.path.join(dataset, 'test_gt')
train_im = loop_files(trains_im_path)
train_gt = loop_files(trains_gt_path)
test_im = loop_files(tests_im_path)
test_gt = loop_files(test_gt_path)
train_im_list += train_im
test_im_list += test_im
train_gt_list += train_gt
test_gt_list += test_gt
return train_im_list, train_gt_list, test_im_list, test_gt_list
def draw_loss_plot(train_loss_list=[], test_loss_list=[]):
x1 = range(0, len(train_loss_list))
x2 = range(0, len(test_loss_list))
y1 = train_loss_list
y2 = test_loss_list
plt.subplot(2, 1, 1)
plt.plot(x1, y1, 'o-')
plt.title('train loss vs. iterators')
plt.ylabel('train loss')
plt.subplot(2, 1, 2)
plt.plot(x2, y2, '.-')
plt.xlabel('test loss vs. iterators')
plt.ylabel('test loss')
plt.savefig("test_train_loss.jpg")
if __name__ == '__main__':
cf = ConfigParser.ConfigParser()
cf.read('./config')
log_dir = './logs_10'
if not os.path.exists(log_dir):
os.mkdir(log_dir)
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.DEBUG)
log_file_name = datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S') + '.log'
log_handler = logging.FileHandler(os.path.join(log_dir, log_file_name), 'w')
log_format = formatter = logging.Formatter('%(asctime)s: %(message)s')
log_handler.setFormatter(log_format)
logger.addHandler(log_handler)
gpu_id = cf.get('global', 'gpu_id')
epoch = cf.getint('global', 'epoch')
val_batch_size = cf.getint('global', 'val_batch')
logger.info('Total epoch: {0}'.format(epoch))
using_cuda = cf.getboolean('global', 'using_cuda')
display_img_name = cf.getboolean('global', 'display_file_name')
display_iter = cf.getint('global', 'display_iter')
val_iter = cf.getint('global', 'val_iter')
save_iter = cf.getint('global', 'save_iter')
lr_front = cf.getfloat('parameter', 'lr_front')
lr_behind = cf.getfloat('parameter', 'lr_behind')
change_epoch = cf.getint('parameter', 'change_epoch') - 1
logger.info('Learning rate: {0}, {1}, change epoch: {2}'.format(lr_front, lr_behind, change_epoch + 1))
print('Using gpu id(available if use cuda): {0}'.format(gpu_id))
print('Train epoch: {0}'.format(epoch))
print('Use CUDA: {0}'.format(using_cuda))
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
no_grad = [
'cnn.VGG_16.convolution1_1.weight',
'cnn.VGG_16.convolution1_1.bias',
'cnn.VGG_16.convolution1_2.weight',
'cnn.VGG_16.convolution1_2.bias'
]
if not os.path.exists(MODEL_SAVE_PATH):
os.mkdir(MODEL_SAVE_PATH)
net = Net.CTPN()
for name, value in net.named_parameters():
if name in no_grad:
value.requires_grad = False
else:
value.requires_grad = True
# for name, value in net.named_parameters():
# print('name: {0}, grad: {1}'.format(name, value.requires_grad))
net.load_state_dict(torch.load('./lib/vgg16.model'))
# net.load_state_dict(model_zoo.load_url(model_urls['vgg16']))
lib.utils.init_weight(net)
if using_cuda:
net.cuda()
net.train()
print(net)
criterion = Loss.CTPN_Loss(using_cuda=using_cuda)
train_im_list, train_gt_list, val_im_list, val_gt_list = create_train_val()
total_iter = len(train_im_list)
print("total training image num is %s" % len(train_im_list))
print("total val image num is %s" % len(val_im_list))
train_loss_list = []
test_loss_list = []
for i in range(epoch):
if i >= change_epoch:
lr = lr_behind
else:
lr = lr_front
optimizer = optim.SGD(net.parameters(), lr=lr, momentum=0.9, weight_decay=0.0005)
#optimizer = optim.Adam(net.parameters(), lr=lr)
iteration = 1
total_loss = 0
total_cls_loss = 0
total_v_reg_loss = 0
total_o_reg_loss = 0
start_time = time.time()
random.shuffle(train_im_list)
# print(random_im_list)
for im in train_im_list:
root, file_name = os.path.split(im)
root, _ = os.path.split(root)
name, _ = os.path.splitext(file_name)
gt_name = 'gt_' + name + '.txt'
gt_path = os.path.join(root, "train_gt", gt_name)
if not os.path.exists(gt_path):
print('Ground truth file of image {0} not exists.'.format(im))
continue
gt_txt = lib.dataset_handler.read_gt_file(gt_path)
#print("processing image %s" % os.path.join(img_root1, im))
img = cv2.imread(im)
if img is None:
iteration += 1
continue
img, gt_txt = lib.dataset_handler.scale_img(img, gt_txt)
tensor_img = img[np.newaxis, :, :, :]
tensor_img = tensor_img.transpose((0, 3, 1, 2))
if using_cuda:
tensor_img = torch.FloatTensor(tensor_img).cuda()
else:
tensor_img = torch.FloatTensor(tensor_img)
vertical_pred, score, side_refinement = net(tensor_img)
del tensor_img
# transform bbox gt to anchor gt for training
positive = []
negative = []
vertical_reg = []
side_refinement_reg = []
visual_img = copy.deepcopy(img)
try:
# loop all bbox in one image
for box in gt_txt:
# generate anchors from one bbox
gt_anchor, visual_img = lib.generate_gt_anchor.generate_gt_anchor(img, box, draw_img_gt=visual_img)
positive1, negative1, vertical_reg1, side_refinement_reg1 = lib.tag_anchor.tag_anchor(gt_anchor, score, box)
positive += positive1
negative += negative1
vertical_reg += vertical_reg1
side_refinement_reg += side_refinement_reg1
except:
print("warning: img %s raise error!" % im)
iteration += 1
continue
if len(vertical_reg) == 0 or len(positive) == 0 or len(side_refinement_reg) == 0:
iteration += 1
continue
cv2.imwrite(os.path.join(DRAW_PREFIX, file_name), visual_img)
optimizer.zero_grad()
loss, cls_loss, v_reg_loss, o_reg_loss = criterion(score, vertical_pred, side_refinement, positive,
negative, vertical_reg, side_refinement_reg)
loss.backward()
optimizer.step()
iteration += 1
# save gpu memory by transferring loss to float
total_loss += float(loss)
total_cls_loss += float(cls_loss)
total_v_reg_loss += float(v_reg_loss)
total_o_reg_loss += float(o_reg_loss)
if iteration % display_iter == 0:
end_time = time.time()
total_time = end_time - start_time
print('Epoch: {2}/{3}, Iteration: {0}/{1}, loss: {4}, cls_loss: {5}, v_reg_loss: {6}, o_reg_loss: {7}, {8}'.
format(iteration, total_iter, i, epoch, total_loss / display_iter, total_cls_loss / display_iter,
total_v_reg_loss / display_iter, total_o_reg_loss / display_iter, im))
logger.info('Epoch: {2}/{3}, Iteration: {0}/{1}'.format(iteration, total_iter, i, epoch))
logger.info('loss: {0}'.format(total_loss / display_iter))
logger.info('classification loss: {0}'.format(total_cls_loss / display_iter))
logger.info('vertical regression loss: {0}'.format(total_v_reg_loss / display_iter))
logger.info('side-refinement regression loss: {0}'.format(total_o_reg_loss / display_iter))
train_loss_list.append(total_loss)
total_loss = 0
total_cls_loss = 0
total_v_reg_loss = 0
total_o_reg_loss = 0
start_time = time.time()
if iteration % val_iter == 0:
net.eval()
logger.info('Start evaluate at {0} epoch {1} iteration.'.format(i, iteration))
val_loss = evaluate.val(net, criterion, val_batch_size, using_cuda, logger, val_im_list)
logger.info('End evaluate.')
net.train()
start_time = time.time()
test_loss_list.append(val_loss)
if iteration % save_iter == 0:
print('Model saved at ./model/ctpn-{0}-{1}.model'.format(i, iteration))
torch.save(net.state_dict(), os.path.join(MODEL_SAVE_PATH, 'ctpn-msra_ali-{0}-{1}.model'.format(i, iteration)))
print('Model saved at ./model/ctpn-{0}-end.model'.format(i))
torch.save(net.state_dict(), os.path.join(MODEL_SAVE_PATH, 'ctpn-msra_ali-{0}-end.model'.format(i)))
draw_loss_plot(train_loss_list, test_loss_list)
|
test_custom/models.py | AllFactors/django-organizations | 855 | 12630121 | <gh_stars>100-1000
from django.db import models
from organizations.models import Organization
class Team(Organization):
sport = models.CharField(max_length=100, blank=True, null=True)
|
pycoinnet/helpers/standards.py | Jsn2win/pycoinnet | 114 | 12630125 | import asyncio
import logging
import os
import time
from pycoinnet.PeerAddress import PeerAddress
logging = logging.getLogger("standards")
class BitcoinProtocolError(Exception):
pass
def manage_connection_count(host_port_queue, protocol_factory, connection_count=4):
"""
address_queue: a queue of (host, port) tuples
protocol_factory: the callback passed to EventLoop.create_connection
connection_count: number of connections to keep established
"""
event_q = asyncio.Queue()
# asyncio.Task doesn't create any non-weak references to the task,
# so we have to put them in a container with a strong reference
# or they may be garbage collected.
event_q.tasks = set()
@asyncio.coroutine
def run():
while True:
host, port = yield from host_port_queue.get()
logging.debug("got %s:%d from connection pool", host, port)
logging.info("connecting to %s:%d" % (host, port))
try:
transport, protocol = yield from asyncio.get_event_loop().create_connection(
protocol_factory, host=host, port=port)
logging.info("connected (tcp) to %s:%d", host, port)
event_q.put_nowait(("connect", (host, port), protocol))
yield from asyncio.wait_for(protocol.connection_lost_future, timeout=None)
event_q.put_nowait(("disconnect", (host, port), protocol))
except Exception:
logging.exception("failed to connect to %s:%d", host, port)
for i in range(connection_count):
# we add the tasks to a set on event_q so they're not garbage
# collected until event_q is.
event_q.tasks.add(asyncio.Task(run()))
return event_q
@asyncio.coroutine
def create_server(protocol_factory, port):
"""
Listen on a port and create new peers on that connection.
"""
abstract_server = yield from asyncio.get_event_loop().create_server(
protocol_factory=protocol_factory, port=port)
return abstract_server
def version_data_for_peer(
peer, version=70001, local_ip="127.0.0.1", local_port=6111, last_block_index=0,
nonce=None, subversion=b"/Notoshi/", timestamp=None, want_relay=True):
remote_ip, remote_port = peer.peername[:2]
remote_addr = PeerAddress(1, remote_ip, remote_port)
local_addr = PeerAddress(1, local_ip, local_port)
nonce = nonce or int.from_bytes(os.urandom(8), byteorder="big")
timestamp = timestamp or int(time.time())
d = dict(
version=70001, subversion=subversion, services=1, timestamp=timestamp,
remote_address=remote_addr, local_address=local_addr,
nonce=nonce,
last_block_index=last_block_index, want_relay=want_relay
)
return d
@asyncio.coroutine
def initial_handshake(peer, version_parameters):
# do handshake
next_message = peer.new_get_next_message_f()
peer.send_msg("version", **version_parameters)
message_name, version_data = yield from next_message()
if message_name != 'version':
raise BitcoinProtocolError("missing version")
peer.send_msg("verack")
message_name, data = yield from next_message()
if message_name != 'verack':
raise BitcoinProtocolError("missing verack")
logging.info("handshake complete with %s, version_data => %s", peer, version_data)
return version_data
def install_ping_manager(peer, heartbeat_rate=60, missing_pong_disconnect_timeout=60):
@asyncio.coroutine
def ping_task(next_message):
while True:
try:
yield from asyncio.wait_for(next_message(), timeout=heartbeat_rate)
continue
except asyncio.TimeoutError:
pass
# oh oh! no messages
# send a ping
nonce = int.from_bytes(os.urandom(8), byteorder="big")
peer.send_msg("ping", nonce=nonce)
end_time = time.time() + missing_pong_disconnect_timeout
while True:
try:
timeout = end_time - time.time()
name, data = yield from asyncio.wait_for(next_message(), timeout=timeout)
if name == "pong" and data["nonce"] == nonce:
break
except asyncio.TimeoutError:
peer.connection_lost(None)
logging.error("remote peer %s didn't answer ping, disconnecting", peer)
return
next_message = peer.new_get_next_message_f()
peer.add_task(ping_task(next_message))
def install_pong_manager(peer):
@asyncio.coroutine
def pong_task(next_message):
while True:
name, data = yield from next_message()
assert name == 'ping'
peer.send_msg("pong", nonce=data["nonce"])
next_message = peer.new_get_next_message_f(lambda name, data: name == 'ping')
peer.add_task(pong_task(next_message))
def install_pingpong_manager(peer):
install_ping_manager(peer)
install_pong_manager(peer)
@asyncio.coroutine
def get_date_address_tuples(peer):
next_message = peer.new_get_next_message_f(lambda name, data: name == 'addr')
peer.send_msg("getaddr")
name, data = yield from next_message()
return data["date_address_tuples"]
@asyncio.coroutine
def get_headers_hashes(peer, until_block_hash):
hashes = [until_block_hash]
peer.send_msg(message_name="getheaders", version=1, hashes=hashes, hash_stop=until_block_hash)
next_message = peer.new_get_next_message_f(lambda name, data: name == 'headers')
name, data = yield from next_message()
headers = [bh for bh, t in data["headers"]]
return headers
@asyncio.coroutine
def do_get_headers(peer, block_locator_hashes, hash_stop=b'\0'*32):
peer.send_msg(message_name="getheaders", version=1, hashes=block_locator_hashes, hash_stop=hash_stop)
next_message = peer.new_get_next_message_f(lambda name, data: name == 'headers')
name, data = yield from next_message()
headers = [bh for bh, t in data["headers"]]
return headers
|
integration/tests/error_assert_file.py | youhavethewrong/hurl | 1,013 | 12630127 | from tests import app
@app.route("/error-assert-file")
def error_assert_file():
return 'Hello' |
tests/test_normalise.py | ecmwf/climetlab | 182 | 12630148 | #!/usr/bin/env python3
# (C) Copyright 2020 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
# In applying this licence, ECMWF does not waive the privileges and immunities
# granted to it by virtue of its status as an intergovernmental organisation
# nor does it submit to any jurisdiction.
#
import datetime
import sys
import numpy as np
import pytest
from climetlab import ALL, load_source
from climetlab.decorators import normalize
from climetlab.normalize import DateListNormaliser, EnumListNormaliser, EnumNormaliser
from climetlab.testing import climetlab_file
from climetlab.utils.bbox import BoundingBox
@normalize("parameter", ("variable-list(mars)"))
def values_mars(parameter):
return parameter
@normalize("parameter", ("variable-list(cf)"))
def values_cf(parameter):
return parameter
def test_param_convention_mars():
assert values_mars(parameter="tp") == "tp"
assert values_mars(parameter="2t") == "2t"
assert values_mars(parameter="t2m") == "2t"
assert values_mars(parameter=["t2m", "tp"]) == ["2t", "tp"]
assert values_mars(parameter="whatever") == "whatever"
def test_param_convention_cf():
assert values_cf(parameter="tp") == "tp"
assert values_cf(parameter="2t") == "t2m"
assert values_cf(parameter="t2m") == "t2m"
@normalize("date", "date")
def dates_1(date):
return date
@normalize("date", "date-list")
def dates_list_1(date):
return date
def test_dates():
npdate = np.datetime64("2016-01-01")
assert dates_1(date=npdate) == datetime.datetime(2016, 1, 1)
assert dates_list_1(date=npdate) == [datetime.datetime(2016, 1, 1)]
source = load_source("file", climetlab_file("docs/examples/test.grib"))
assert dates_1(source[0]) == datetime.datetime(2020, 5, 13, 12, 0)
assert dates_list_1(source[0]) == [datetime.datetime(2020, 5, 13, 12, 0)]
source = load_source("file", climetlab_file("docs/examples/test.nc"))
# For now
with pytest.raises(NotImplementedError):
assert dates_1(source[0]) == datetime.datetime(2020, 5, 13, 12, 0)
assert dates_list_1(source[0]) == [datetime.datetime(2020, 5, 13, 12, 0)]
def test_dates_no_list():
norm = DateListNormaliser("%Y.%m.%d")
assert norm("20200513") == ["2020.05.13"]
assert norm([datetime.datetime(2020, 5, 13, 0, 0)]) == ["2020.05.13"]
assert norm([datetime.datetime(2020, 5, 13, 23, 59)]) == ["2020.05.13"]
# def test_dates_with_list():
# norm = DateListNormaliser("%Y.%m.%d", valid=["2020.05.13"] )
# assert norm("20200513") == ["2020.05.13"]
# assert norm([datetime.datetime(2020, 5, 13, 12, 0)]) == ["2020.05.13"]
#
# with pytest.raises(ValueError):
# assert norm("19991231")
def test_dates_3():
norm = DateListNormaliser()
assert norm("20200513") == [datetime.datetime(2020, 5, 13, 0, 0)]
assert norm([datetime.datetime(2020, 5, 13, 0, 0)]) == [
datetime.datetime(2020, 5, 13, 0, 0)
]
@normalize("area", "bounding-box")
def bbox_list(ignore, area):
return area
@normalize("area", "bounding-box(tuple)")
def bbox_tuple(area, ignore=None):
return area
@normalize("area", "bounding-box(list)")
def bbox_bbox(area):
return area
@normalize("area", "bounding-box(dict)")
def bbox_dict(area):
return area
@normalize("area", "bounding-box")
def bbox_defaults(area=None):
return area
# def test_enum_definition():
@normalize("name", ("a", "b", "c"))
def enum_1(name="a"):
return name
@normalize("name", ("a", "b", "c"))
def enum_no_default(name):
return name
@normalize("name", ("a", "b", "c"))
def enum_default_is_none(name=None):
return name
@normalize("name", (1, 0.5, 3))
def enum_number(name=1):
return name
# for k, v in vars().items():
# globals()[k] = v
# def test_enum_list_definition():
@normalize("name", ["a", "b", "c"])
def enum_list_1(name="a"):
return name
@normalize("name", ["a", "b", "c"])
def enum_list_no_default(name):
return name
@normalize("name", ["a", "b", "c"])
def enum_list_default_is_none(name=None):
return name
@normalize("name", ["a", "b", "c"])
def enum_list_default_is_all(name=ALL):
return name
@normalize("name", [1, 0.5, 3])
def enum_list_number(name=1):
return name
@normalize("a", [1, 2])
@normalize("b", [3, 4])
def enum_2_normalizers(a, b):
return a
def test_enum_2_normalizers():
enum_2_normalizers(a=1, b=4)
# enum_2_normalizers(1,4)
@normalize(
"name",
["a", "b", "c"],
alias={
"ab": ["a", "b"],
"z": "a",
"i": ["a", "b"],
"j": "ab",
"bad": ["a", "ab"],
},
)
def enum_list_alias_1(name=1):
return name
def test_enum_list_alias_1():
assert enum_list_alias_1("a") == ["a"]
assert enum_list_alias_1("b") == ["b"]
assert enum_list_alias_1("ab") == ["a", "b"]
assert enum_list_alias_1("z") == ["a"]
assert enum_list_alias_1(["z", "b"]) == ["a", "b"]
assert enum_list_alias_1("i") == ["a", "b"]
assert enum_list_alias_1("j") == ["a", "b"]
with pytest.raises(ValueError):
enum_list_alias_1("bad")
@normalize(
"name",
[1, 2, 3],
alias=lambda x: {"one": 1, "o": "one"}.get(x, x),
)
def enum_list_alias_2(name=1):
return name
def test_enum_list_alias_2():
assert enum_list_alias_2(1) == [1]
assert enum_list_alias_2("one") == [1]
assert enum_list_alias_2(["one"]) == [1]
assert enum_list_alias_2(["o"]) == [1]
@normalize("name", ["a", "b", "c"], alias={"x": "y", "y": "z", "z": "a"})
def enum_alias(name=1):
return name
def test_enum_alias():
assert enum_alias("a") == ["a"]
assert enum_alias("b") == ["b"]
assert enum_alias("x") == ["a"]
assert enum_alias("y") == ["a"]
assert enum_alias("z") == ["a"]
# for k, v in vars().items():
# globals()[k] = v
def test_enum_decorator():
assert enum_1("a") == "a"
assert enum_1("b") == "b"
assert enum_1() == "a"
with pytest.raises(ValueError):
enum_1("z")
with pytest.raises(ValueError):
enum_1(["a", "b"])
def test_enum_decorator_default():
assert enum_no_default("a") == "a"
assert enum_default_is_none("a") == "a"
with pytest.raises(ValueError):
enum_default_is_none()
with pytest.raises(TypeError):
enum_no_default()
def test_enum():
enum_3 = EnumNormaliser(["a", "b", "c"])
assert enum_3("a") == "a"
assert enum_3("b") == "b"
with pytest.raises(ValueError):
enum_3("z")
with pytest.raises(ValueError):
enum_3(ALL)
def test_enum_list_decorator_default():
assert enum_list_no_default("a") == ["a"]
assert enum_list_default_is_none("a") == ["a"]
assert enum_list_default_is_none() == ["a", "b", "c"]
assert enum_list_default_is_all() == ["a", "b", "c"]
assert enum_list_number(1.0) == [1]
with pytest.raises(ValueError):
enum_list_number("1")
# with pytest.raises(ValueError):
# enum_list_default_is_none()
with pytest.raises(TypeError):
enum_list_no_default()
def test_enum_list_case_sensitive():
enum_5 = EnumListNormaliser(["A", "b", "c"])
assert enum_5(ALL) == ["A", "b", "c"]
assert enum_5("a") == ["A"]
assert enum_5("A") == ["A"]
assert enum_5(["a", "B"]) == ["A", "b"]
def test_bbox():
area = [30.0, 2.0, 3.0, 4.0]
bbox = BoundingBox(north=30, west=2, south=3, east=4)
assert bbox_list(None, area) == bbox
assert bbox_list(area=area, ignore=None) == bbox
assert bbox_tuple(area) == tuple(area)
assert bbox_tuple(area=area) == tuple(area)
assert bbox_bbox(area) == area
assert bbox_dict(area) == dict(north=30, west=2, south=3, east=4)
assert bbox_defaults(area) == bbox
source = load_source("file", climetlab_file("docs/examples/test.grib"))
assert bbox_tuple(source[0]) == (73.0, -27.0, 33.0, 45.0)
source = load_source("file", climetlab_file("docs/examples/test.nc"))
assert bbox_tuple(source[0]) == (73.0, -27.0, 33.0, 45.0)
def test_normalize_kwargs():
class Klass:
@normalize("param", ["a", "b", "c"])
def ok(self, param):
pass
@normalize("param", ["a", "b", "c"])
def f(self, **kwargs):
# def f(self, param, **kwargs):
assert "param" in kwargs
Klass().ok(param="a")
Klass().f(param="a")
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python < 3.8")
def test_normalize_advanced_1():
exec(
"""
# def f(a,/, b, c=4,*, x=3):
# return a,b,c,x
# args = ['A']
# kwargs=dict(b=2, c=4)
@normalize("b", ["B", "BB"])
def f(a, /, b, c=4, *, x=3):
return a, b, c, x
out = f("A", b="B", c=7, x=8)
assert out == ("A", ["B"], 7, 8)
"""
)
@pytest.mark.skipif(sys.version_info < (3, 8), reason="Python < 3.8")
def test_normalize_advanced_2():
exec(
"""
@normalize("b", ["B", "BB"])
@normalize("a", ["A", "AA"])
def g(a, /, b, c=4, *, x=3):
return a, b, c, x
out = g("A", b="B", c=7, x=8)
assert out == (["A"], ["B"], 7, 8)
"""
)
def test_normalize_advanced_3():
from climetlab.normalize import _find_normaliser as normaliser
assert normaliser((1, 2), type=str, multiple=True)(1) == ["1"]
assert normaliser((1, 2), type=str, multiple=True)((1, 2)) == ["1", "2"]
if __name__ == "__main__":
# test_normalize_advanced_3()
from climetlab.testing import main
main(__file__)
|
tests_nntrainer/test_types.py | mzolfaghari/coot-videotext | 213 | 12630149 | """
Test types.
"""
import json
import tempfile
from pathlib import Path
import pydantic
import pytest
import torch as th
from nntrainer import typext
def test_typednamedtuple() -> None:
"""
Test typed named tuples class based on pydantic.BaseModel
"""
class ExampleTuple(typext.TypedNamedTuple):
key: str
data: th.Tensor
# data tensor should have shape (X, 4) where X is arbitrary.
_shapes_dict = {
"data": (None, 4)}
# correct input
t = ExampleTuple("key", th.zeros(7, 4))
# correct input with mixed args, kwargs
ExampleTuple("key", data=th.zeros(3, 4))
ExampleTuple(key="key", data=th.zeros(3, 4))
# not enough input
with pytest.raises(pydantic.ValidationError):
ExampleTuple("key")
# duplicate argument
with pytest.raises(AssertionError):
ExampleTuple("key", key="key", data=th.zeros(3, 4))
# too many arguments
with pytest.raises(AssertionError):
ExampleTuple("key", th.zeros(3, 4), None)
# wrong type
with pytest.raises(pydantic.ValidationError):
ExampleTuple(False, 0)
# wrong shape
with pytest.raises(AssertionError):
ExampleTuple("key", th.zeros(3, 6))
with pytest.raises(AssertionError):
ExampleTuple("key", th.zeros(6))
with pytest.raises(AssertionError):
ExampleTuple("key", th.zeros(4, 1, 1))
# test dict and tuple access
assert isinstance(t.dict(), dict)
assert t.dict()["key"] == "key"
assert isinstance(t.tuple(), tuple)
assert t.tuple()[0] == "key"
def test_saveablebasemodel() -> None:
"""
Test saveable base model based on pydantic.BaseModel
"""
class TestState(typext.SaveableBaseModel):
test_field: int = 1
input_dict = {"test_field": 7}
t1 = TestState(**input_dict)
print(t1)
tf = Path(tempfile.gettempdir()) / "temp_nntrainer.tmp"
t1.save(tf)
file_content = json.load(tf.open(encoding="utf8"))
assert file_content == input_dict, f"{file_content} vs {input_dict}"
t2 = TestState().load(tf)
assert t1 == t2
t3 = TestState.create_from_file(tf)
assert t1 == t3
wrong_dict = {"test_field": "str"}
json.dump(wrong_dict, tf.open("wt", encoding="utf8"))
with pytest.raises(pydantic.ValidationError):
TestState.create_from_file(tf)
with pytest.raises(pydantic.ValidationError):
TestState().load(tf)
if __name__ == "__main__":
test_typednamedtuple()
test_saveablebasemodel()
|
test/hummingbot/connector/derivative/bybit_perpetual/test_bybit_perpetual_utils.py | BGTCapital/hummingbot | 3,027 | 12630152 | import pandas as pd
from unittest import TestCase
from unittest.mock import patch
from hummingbot.connector.derivative.bybit_perpetual import bybit_perpetual_constants as CONSTANTS, bybit_perpetual_utils as utils
class BybitPerpetualUtilsTests(TestCase):
@patch('hummingbot.connector.derivative.bybit_perpetual.bybit_perpetual_utils.get_tracking_nonce')
def test_client_order_id_creation(self, nonce_provider_mock):
nonce_provider_mock.return_value = int(1e15)
self.assertEqual("HBOT-B-BTC-USDT-1000000000000000", utils.get_new_client_order_id(True, "BTC-USDT"))
nonce_provider_mock.return_value = int(1e15) + 1
self.assertEqual("HBOT-S-ETH-USDT-1000000000000001", utils.get_new_client_order_id(False, "ETH-USDT"))
def test_trading_pair_convertion(self):
trading_pair = "BTC-USDT"
self.assertEqual("BTCUSDT", utils.convert_to_exchange_trading_pair(trading_pair))
def test_rest_api_path_for_endpoint(self):
endpoint = {"linear": "/testEndpoint/linear",
"non_linear": "/testEndpoint/non_linear"}
api_path = utils.rest_api_path_for_endpoint(endpoint=endpoint)
self.assertEqual("/testEndpoint/non_linear", api_path)
api_path = utils.rest_api_path_for_endpoint(endpoint=endpoint, trading_pair="BTC-USD")
self.assertEqual("/testEndpoint/non_linear", api_path)
api_path = utils.rest_api_path_for_endpoint(endpoint=endpoint, trading_pair="BTC-USDT")
self.assertEqual("/testEndpoint/linear", api_path)
def test_rest_api_url(self):
endpoint = "/testEndpoint"
url = utils.rest_api_url_for_endpoint(endpoint=endpoint, domain=None, )
self.assertEqual(CONSTANTS.REST_URLS.get("bybit_perpetual_main") + "/testEndpoint", url)
url = utils.rest_api_url_for_endpoint(endpoint=endpoint, domain="bybit_perpetual_main")
self.assertEqual(CONSTANTS.REST_URLS.get("bybit_perpetual_main") + "/testEndpoint", url)
url = utils.rest_api_url_for_endpoint(endpoint=endpoint, domain="bybit_perpetual_testnet")
self.assertEqual(CONSTANTS.REST_URLS.get("bybit_perpetual_testnet") + "/testEndpoint", url)
def test_wss_linear_public_url(self):
url = utils.wss_linear_public_url(None)
self.assertEqual(CONSTANTS.WSS_LINEAR_PUBLIC_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_linear_public_url("bybit_perpetual_main")
self.assertEqual(CONSTANTS.WSS_LINEAR_PUBLIC_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_linear_public_url("bybit_perpetual_testnet")
self.assertEqual(CONSTANTS.WSS_LINEAR_PUBLIC_URLS.get("bybit_perpetual_testnet"), url)
def test_wss_linear_private_url(self):
url = utils.wss_linear_private_url(None)
self.assertEqual(CONSTANTS.WSS_LINEAR_PRIVATE_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_linear_private_url("bybit_perpetual_main")
self.assertEqual(CONSTANTS.WSS_LINEAR_PRIVATE_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_linear_private_url("bybit_perpetual_testnet")
self.assertEqual(CONSTANTS.WSS_LINEAR_PRIVATE_URLS.get("bybit_perpetual_testnet"), url)
def test_wss_non_linear_public_url(self):
url = utils.wss_non_linear_public_url(None)
self.assertEqual(CONSTANTS.WSS_NON_LINEAR_PUBLIC_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_non_linear_public_url("bybit_perpetual_main")
self.assertEqual(CONSTANTS.WSS_NON_LINEAR_PUBLIC_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_non_linear_public_url("bybit_perpetual_testnet")
self.assertEqual(CONSTANTS.WSS_NON_LINEAR_PUBLIC_URLS.get("bybit_perpetual_testnet"), url)
def test_wss_non_linear_private_url(self):
url = utils.wss_non_linear_private_url(None)
self.assertEqual(CONSTANTS.WSS_NON_LINEAR_PRIVATE_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_non_linear_private_url("bybit_perpetual_main")
self.assertEqual(CONSTANTS.WSS_NON_LINEAR_PRIVATE_URLS.get("bybit_perpetual_main"), url)
url = utils.wss_non_linear_private_url("bybit_perpetual_testnet")
self.assertEqual(CONSTANTS.WSS_NON_LINEAR_PRIVATE_URLS.get("bybit_perpetual_testnet"), url)
def test_get_next_funding_timestamp(self):
# Simulate 01:00 UTC
timestamp = pd.Timestamp("2021-08-21-01:00:00", tz="UTC").timestamp()
expected_ts = pd.Timestamp("2021-08-21-08:00:00", tz="UTC").timestamp()
self.assertEqual(expected_ts, utils.get_next_funding_timestamp(timestamp))
# Simulate 09:00 UTC
timestamp = pd.Timestamp("2021-08-21-09:00:00", tz="UTC").timestamp()
expected_ts = pd.Timestamp("2021-08-21-16:00:00", tz="UTC").timestamp()
self.assertEqual(expected_ts, utils.get_next_funding_timestamp(timestamp))
# Simulate 17:00 UTC
timestamp = pd.Timestamp("2021-08-21-17:00:00", tz="UTC").timestamp()
expected_ts = pd.Timestamp("2021-08-22-00:00:00", tz="UTC").timestamp()
self.assertEqual(expected_ts, utils.get_next_funding_timestamp(timestamp))
|
dfvfs/vfs/gzip_file_entry.py | dfjxs/dfvfs | 176 | 12630157 | # -*- coding: utf-8 -*-
"""The gzip file entry implementation."""
from dfdatetime import posix_time as dfdatetime_posix_time
from dfvfs.lib import definitions
from dfvfs.lib import errors
from dfvfs.resolver import resolver
from dfvfs.vfs import root_only_file_entry
class GzipFileEntry(root_only_file_entry.RootOnlyFileEntry):
"""File system file entry that uses gzip."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_GZIP
def __init__(
self, resolver_context, file_system, path_spec, is_root=False,
is_virtual=False):
"""Initializes a file entry.
Args:
resolver_context (Context): resolver context.
file_system (FileSystem): file system.
path_spec (PathSpec): path specification.
is_root (Optional[bool]): True if the file entry is the root file entry
of the corresponding file system.
is_virtual (Optional[bool]): True if the file entry is a virtual file
Raises:
BackEndError: when the gzip file is missing.
"""
gzip_file = resolver.Resolver.OpenFileObject(
path_spec, resolver_context=resolver_context)
if not gzip_file:
raise errors.BackEndError('Missing gzip file.')
super(GzipFileEntry, self).__init__(
resolver_context, file_system, path_spec, is_root=is_root,
is_virtual=is_virtual)
self._gzip_file = gzip_file
self.entry_type = definitions.FILE_ENTRY_TYPE_FILE
@property
def modification_time(self):
"""dfdatetime.DateTimeValues: modification time or None if not available."""
timestamps = self._gzip_file.modification_times
if not timestamps:
return None
return dfdatetime_posix_time.PosixTime(timestamp=timestamps[0])
@property
def size(self):
"""int: size of the file entry in bytes or None if not available."""
return self._gzip_file.uncompressed_data_size
|
third_party/boringssl/BUILD.generated_tests.bzl | brandonpollack23/bazel-buildfiles-upstream | 142 | 12630170 | <reponame>brandonpollack23/bazel-buildfiles-upstream
# This file is created by generate_build_files.py. Do not edit manually.
test_support_sources = [
"src/crypto/test/file_test.cc",
"src/crypto/test/test_util.cc",
]
def create_tests(copts):
test_support_sources_complete = test_support_sources + \
native.glob(["src/crypto/test/*.h"])
native.cc_test(
name = "aes_test",
size = "small",
srcs = ["src/crypto/aes/aes_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "base64_test",
size = "small",
srcs = ["src/crypto/base64/base64_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "bio_test",
size = "small",
srcs = ["src/crypto/bio/bio_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "bn_test",
size = "small",
srcs = ["src/crypto/bn/bn_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "bytestring_test",
size = "small",
srcs = ["src/crypto/bytestring/bytestring_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_gcm",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-gcm",
"$(location src/crypto/cipher/test/aes_128_gcm_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_gcm_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_key_wrap",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-key-wrap",
"$(location src/crypto/cipher/test/aes_128_key_wrap_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_key_wrap_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_gcm",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-gcm",
"$(location src/crypto/cipher/test/aes_256_gcm_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_gcm_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_key_wrap",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-key-wrap",
"$(location src/crypto/cipher/test/aes_256_key_wrap_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_key_wrap_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_chacha20_poly1305",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"chacha20-poly1305",
"$(location src/crypto/cipher/test/chacha20_poly1305_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/chacha20_poly1305_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_chacha20_poly1305_old",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"chacha20-poly1305-old",
"$(location src/crypto/cipher/test/chacha20_poly1305_old_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/chacha20_poly1305_old_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_rc4_md5_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"rc4-md5-tls",
"$(location src/crypto/cipher/test/rc4_md5_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/rc4_md5_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_rc4_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"rc4-sha1-tls",
"$(location src/crypto/cipher/test/rc4_sha1_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/rc4_sha1_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-cbc-sha1-tls",
"$(location src/crypto/cipher/test/aes_128_cbc_sha1_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_cbc_sha1_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha1_tls_implicit_iv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-cbc-sha1-tls-implicit-iv",
"$(location src/crypto/cipher/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_cbc_sha1_tls_implicit_iv_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha256_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-cbc-sha256-tls",
"$(location src/crypto/cipher/test/aes_128_cbc_sha256_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_cbc_sha256_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-cbc-sha1-tls",
"$(location src/crypto/cipher/test/aes_256_cbc_sha1_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_cbc_sha1_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha1_tls_implicit_iv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-cbc-sha1-tls-implicit-iv",
"$(location src/crypto/cipher/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_cbc_sha1_tls_implicit_iv_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha256_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-cbc-sha256-tls",
"$(location src/crypto/cipher/test/aes_256_cbc_sha256_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_cbc_sha256_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha384_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-cbc-sha384-tls",
"$(location src/crypto/cipher/test/aes_256_cbc_sha384_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_cbc_sha384_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_des_ede3_cbc_sha1_tls",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"des-ede3-cbc-sha1-tls",
"$(location src/crypto/cipher/test/des_ede3_cbc_sha1_tls_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/des_ede3_cbc_sha1_tls_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_des_ede3_cbc_sha1_tls_implicit_iv",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"des-ede3-cbc-sha1-tls-implicit-iv",
"$(location src/crypto/cipher/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/des_ede3_cbc_sha1_tls_implicit_iv_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_rc4_md5_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"rc4-md5-ssl3",
"$(location src/crypto/cipher/test/rc4_md5_ssl3_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/rc4_md5_ssl3_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_rc4_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"rc4-sha1-ssl3",
"$(location src/crypto/cipher/test/rc4_sha1_ssl3_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/rc4_sha1_ssl3_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_cbc_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-cbc-sha1-ssl3",
"$(location src/crypto/cipher/test/aes_128_cbc_sha1_ssl3_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_cbc_sha1_ssl3_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_cbc_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-cbc-sha1-ssl3",
"$(location src/crypto/cipher/test/aes_256_cbc_sha1_ssl3_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_cbc_sha1_ssl3_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_des_ede3_cbc_sha1_ssl3",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"des-ede3-cbc-sha1-ssl3",
"$(location src/crypto/cipher/test/des_ede3_cbc_sha1_ssl3_tests.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/des_ede3_cbc_sha1_ssl3_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_128_ctr_hmac_sha256",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-128-ctr-hmac-sha256",
"$(location src/crypto/cipher/test/aes_128_ctr_hmac_sha256.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_128_ctr_hmac_sha256.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "aead_test_aes_256_ctr_hmac_sha256",
size = "small",
srcs = ["src/crypto/cipher/aead_test.cc"] + test_support_sources_complete,
args = [
"aes-256-ctr-hmac-sha256",
"$(location src/crypto/cipher/test/aes_256_ctr_hmac_sha256.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/aes_256_ctr_hmac_sha256.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "cipher_test",
size = "small",
srcs = ["src/crypto/cipher/cipher_test.cc"] + test_support_sources_complete,
args = [
"$(location src/crypto/cipher/test/cipher_test.txt)",
],
copts = copts,
data = [
"src/crypto/cipher/test/cipher_test.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "cmac_test",
size = "small",
srcs = ["src/crypto/cmac/cmac_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "constant_time_test",
size = "small",
srcs = ["src/crypto/constant_time_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "ed25519_test",
size = "small",
srcs = ["src/crypto/curve25519/ed25519_test.cc"] + test_support_sources_complete,
args = [
"$(location src/crypto/curve25519/ed25519_tests.txt)",
],
copts = copts,
data = [
"src/crypto/curve25519/ed25519_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "x25519_test",
size = "small",
srcs = ["src/crypto/curve25519/x25519_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "dh_test",
size = "small",
srcs = ["src/crypto/dh/dh_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "digest_test",
size = "small",
srcs = ["src/crypto/digest/digest_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "dsa_test",
size = "small",
srcs = ["src/crypto/dsa/dsa_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "ec_test",
size = "small",
srcs = ["src/crypto/ec/ec_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "example_mul",
size = "small",
srcs = ["src/crypto/ec/example_mul.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "ecdsa_test",
size = "small",
srcs = ["src/crypto/ecdsa/ecdsa_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "err_test",
size = "small",
srcs = ["src/crypto/err/err_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "evp_extra_test",
size = "small",
srcs = ["src/crypto/evp/evp_extra_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "evp_test",
size = "small",
srcs = ["src/crypto/evp/evp_test.cc"] + test_support_sources_complete,
args = [
"$(location src/crypto/evp/evp_tests.txt)",
],
copts = copts,
data = [
"src/crypto/evp/evp_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "pbkdf_test",
size = "small",
srcs = ["src/crypto/evp/pbkdf_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "hkdf_test",
size = "small",
srcs = ["src/crypto/hkdf/hkdf_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "hmac_test",
size = "small",
srcs = ["src/crypto/hmac/hmac_test.cc"] + test_support_sources_complete,
args = [
"$(location src/crypto/hmac/hmac_tests.txt)",
],
copts = copts,
data = [
"src/crypto/hmac/hmac_tests.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "lhash_test",
size = "small",
srcs = ["src/crypto/lhash/lhash_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "gcm_test",
size = "small",
srcs = ["src/crypto/modes/gcm_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "pkcs8_test",
size = "small",
srcs = ["src/crypto/pkcs8/pkcs8_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "pkcs12_test",
size = "small",
srcs = ["src/crypto/pkcs8/pkcs12_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "poly1305_test",
size = "small",
srcs = ["src/crypto/poly1305/poly1305_test.cc"] + test_support_sources_complete,
args = [
"$(location src/crypto/poly1305/poly1305_test.txt)",
],
copts = copts,
data = [
"src/crypto/poly1305/poly1305_test.txt",
],
deps = [":crypto"],
)
native.cc_test(
name = "refcount_test",
size = "small",
srcs = ["src/crypto/refcount_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "rsa_test",
size = "small",
srcs = ["src/crypto/rsa/rsa_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "thread_test",
size = "small",
srcs = ["src/crypto/thread_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "pkcs7_test",
size = "small",
srcs = ["src/crypto/x509/pkcs7_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "x509_test",
size = "small",
srcs = ["src/crypto/x509/x509_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "tab_test",
size = "small",
srcs = ["src/crypto/x509v3/tab_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "v3name_test",
size = "small",
srcs = ["src/crypto/x509v3/v3name_test.c"] + test_support_sources_complete,
copts = copts,
deps = [":crypto"],
)
native.cc_test(
name = "pqueue_test",
size = "small",
srcs = ["src/ssl/pqueue/pqueue_test.c"] + test_support_sources_complete,
copts = copts,
deps = [
":crypto",
":ssl",
],
)
native.cc_test(
name = "ssl_test",
size = "small",
srcs = ["src/ssl/ssl_test.cc"] + test_support_sources_complete,
copts = copts,
deps = [
":crypto",
":ssl",
],
)
|
test/nnUNetV1/network_training/nnUNetTrainer_pGDice.py | jianhuasong/medical-image-segmentation2 | 2,774 | 12630183 | from nnunet.training.loss_functions.dice_loss import PenaltyGDiceLoss
from nnunet.training.network_training.nnUNetTrainer import nnUNetTrainer
from nnunet.utilities.nd_softmax import softmax_helper
class nnUNetTrainer_pGDice(nnUNetTrainer):
def __init__(self, plans_file, fold, output_folder=None, dataset_directory=None, batch_dice=True, stage=None,
unpack_data=True, deterministic=True, fp16=False):
super().__init__(plans_file, fold, output_folder, dataset_directory, batch_dice, stage,
unpack_data, deterministic, fp16)
# self.apply_nonlin = softmax_helper
self.loss = PenaltyGDiceLoss({'batch_dice':self.batch_dice, 'smooth':1e-5, 'do_bg':False})
|
tests/apis/test_default_namespace.py | tavaresrodrigo/kopf | 855 | 12630200 | from kopf._cogs.clients.api import get_default_namespace
async def test_default_namespace_when_unset(mocker, enforced_context):
mocker.patch.object(enforced_context, 'default_namespace', None)
ns = await get_default_namespace()
assert ns is None
async def test_default_namespace_when_set(mocker, enforced_context):
mocker.patch.object(enforced_context, 'default_namespace', 'xyz')
ns = await get_default_namespace()
assert ns == 'xyz'
|
examples/from-wiki/gl_interop.py | hesom/pycuda | 1,264 | 12630207 | #!python
# GL interoperability example, by <NAME>.
# Draws a rotating teapot, using cuda to invert the RGB value
# each frame
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL.ARB.vertex_buffer_object import *
from OpenGL.GL.ARB.pixel_buffer_object import *
import numpy, sys, time
import pycuda.driver as cuda_driver
import pycuda.gl as cuda_gl
from pycuda.compiler import SourceModule
#this is all munged together from the CUDA SDK postprocessGL example.
initial_size = 512,512
current_size = initial_size
animate = True
enable_cuda = True
window = None # Number of the glut window.
time_of_last_draw = 0.0
time_of_last_titleupdate = 0.0
frames_per_second = 0.0
frame_counter = 0
output_texture = None # pointer to offscreen render target
(source_pbo, dest_pbo, cuda_module, invert,
pycuda_source_pbo, pycuda_dest_pbo) = [None]*6
heading,pitch,bank = [0.0]*3
def create_PBOs(w,h):
global source_pbo, dest_pbo, pycuda_source_pbo, pycuda_dest_pbo
num_texels = w*h
data = numpy.zeros((num_texels,4),numpy.uint8)
source_pbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, source_pbo)
glBufferData(GL_ARRAY_BUFFER, data, GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
pycuda_source_pbo = cuda_gl.BufferObject(int(source_pbo))
dest_pbo = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, dest_pbo)
glBufferData(GL_ARRAY_BUFFER, data, GL_DYNAMIC_DRAW)
glBindBuffer(GL_ARRAY_BUFFER, 0)
pycuda_dest_pbo = cuda_gl.BufferObject(int(dest_pbo))
def destroy_PBOs():
global source_pbo, dest_pbo, pycuda_source_pbo, pycuda_dest_pbo
for pbo in [source_pbo, dest_pbo]:
glBindBuffer(GL_ARRAY_BUFFER, int(pbo))
glDeleteBuffers(1, int(pbo))
glBindBuffer(GL_ARRAY_BUFFER, 0)
source_pbo,dest_pbo,pycuda_source_pbo,pycuda_dest_pbo = [None]*4
def create_texture(w,h):
global output_texture
output_texture = glGenTextures(1)
glBindTexture(GL_TEXTURE_2D, output_texture)
# set basic parameters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
# buffer data
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA,
w, h, 0, GL_RGBA, GL_UNSIGNED_BYTE, None)
def destroy_texture():
global output_texture
glDeleteTextures(output_texture)
output_texture = None
def init_gl():
Width, Height = current_size
glClearColor(0.1, 0.1, 0.5, 1.0)
glDisable(GL_DEPTH_TEST)
glViewport(0, 0, Width, Height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
glEnable(GL_LIGHT0)
red = ( 1.0, 0.1, 0.1, 1.0 )
white = ( 1.0, 1.0, 1.0, 1.0 )
glMaterialfv(GL_FRONT_AND_BACK, GL_DIFFUSE, red )
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR, white)
glMaterialf( GL_FRONT_AND_BACK, GL_SHININESS, 60.0)
def resize(Width, Height):
global current_size
current_size = Width, Height
glViewport(0, 0, Width, Height) # Reset The Current Viewport And Perspective Transformation
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(60.0, Width/float(Height), 0.1, 10.0)
def do_tick():
global time_of_last_titleupdate, frame_counter, frames_per_second
if ((time.clock () * 1000.0) - time_of_last_titleupdate >= 1000.):
frames_per_second = frame_counter # Save The FPS
frame_counter = 0 # Reset The FPS Counter
szTitle = "%d FPS" % (frames_per_second )
glutSetWindowTitle ( szTitle )
time_of_last_titleupdate = time.clock () * 1000.0
frame_counter += 1
# The function called whenever a key is pressed. Note the use of Python tuples to pass in: (key, x, y)
def keyPressed(*args):
global animate, enable_cuda
# If escape is pressed, kill everything.
if args[0] == '\033':
print('Closing..')
destroy_PBOs()
destroy_texture()
exit()
elif args[0] == 'a':
print('toggling animation')
animate = not animate
elif args[0] == 'e':
print('toggling cuda')
enable_cuda = not enable_cuda
def idle():
global heading, pitch, bank
if animate:
heading += 0.2
pitch += 0.6
bank += 1.0
glutPostRedisplay()
def display():
try:
render_scene()
if enable_cuda:
process_image()
display_image()
glutSwapBuffers()
except:
from traceback import print_exc
print_exc()
from os import _exit
_exit(0)
def process(width, height):
""" Use PyCuda """
grid_dimensions = (width//16,height//16)
source_mapping = pycuda_source_pbo.map()
dest_mapping = pycuda_dest_pbo.map()
invert.prepared_call(grid_dimensions, (16, 16, 1),
source_mapping.device_ptr(),
dest_mapping.device_ptr())
cuda_driver.Context.synchronize()
source_mapping.unmap()
dest_mapping.unmap()
def process_image():
""" copy image and process using CUDA """
global pycuda_source_pbo,source_pbo,current_size, dest_pbo
image_width, image_height = current_size
assert source_pbo is not None
# tell cuda we are going to get into these buffers
pycuda_source_pbo.unregister()
# activate destination buffer
glBindBufferARB(GL_PIXEL_PACK_BUFFER_ARB, int(source_pbo))
# read data into pbo. note: use BGRA format for optimal performance
glReadPixels(
0, #start x
0, #start y
image_width, #end x
image_height, #end y
GL_BGRA, #format
GL_UNSIGNED_BYTE, #output type
ctypes.c_void_p(0))
pycuda_source_pbo = cuda_gl.BufferObject(int(source_pbo))
# run the Cuda kernel
process(image_width, image_height)
# blit convolved texture onto the screen
# download texture from PBO
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, int(dest_pbo))
glBindTexture(GL_TEXTURE_2D, output_texture)
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0,
image_width, image_height,
GL_BGRA, GL_UNSIGNED_BYTE, ctypes.c_void_p(0))
def display_image():
""" render a screen sized quad """
glDisable(GL_DEPTH_TEST)
glDisable(GL_LIGHTING)
glEnable(GL_TEXTURE_2D)
glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE)
glMatrixMode(GL_PROJECTION)
glPushMatrix()
glLoadIdentity()
glOrtho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
glMatrixMode( GL_MODELVIEW)
glLoadIdentity()
glViewport(0, 0, current_size[0], current_size[1])
glBegin(GL_QUADS)
glTexCoord2f(0.0, 0.0)
glVertex3f(-1.0, -1.0, 0.5)
glTexCoord2f(1.0, 0.0)
glVertex3f(1.0, -1.0, 0.5)
glTexCoord2f(1.0, 1.0)
glVertex3f(1.0, 1.0, 0.5)
glTexCoord2f(0.0, 1.0)
glVertex3f(-1.0, 1.0, 0.5)
glEnd()
glMatrixMode(GL_PROJECTION)
glPopMatrix()
glDisable(GL_TEXTURE_2D)
glBindBuffer(GL_PIXEL_PACK_BUFFER_ARB, 0)
glBindBuffer(GL_PIXEL_UNPACK_BUFFER_ARB, 0)
def render_scene():
glClear (GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)# Clear Screen And Depth Buffer
glMatrixMode(GL_MODELVIEW)
glLoadIdentity () # Reset The Modelview Matrix
glTranslatef(0.0, 0.0, -3.0)
glRotatef(heading, 1.0, 0.0, 0.0)
glRotatef(pitch , 0.0, 1.0, 0.0)
glRotatef(bank , 0.0, 0.0, 1.0)
glViewport(0, 0, current_size[0],current_size[1])
glEnable(GL_LIGHTING)
glEnable(GL_DEPTH_TEST)
glDepthFunc(GL_LESS)
glutSolidTeapot(1.0)
do_tick()#just for fps display..
return True
def main():
global window, cuda_module, cuda_gl, cuda_driver, invert
glutInit(sys.argv)
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(*initial_size)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("PyCuda GL Interop Example")
glutDisplayFunc(display)
glutIdleFunc(idle)
glutReshapeFunc(resize)
glutKeyboardFunc(keyPressed)
glutSpecialFunc(keyPressed)
init_gl()
# create texture for blitting to screen
create_texture(*initial_size)
#setup pycuda gl interop
import pycuda.gl.autoinit
import pycuda.gl
cuda_gl = pycuda.gl
cuda_driver = pycuda.driver
cuda_module = SourceModule("""
__global__ void invert(unsigned char *source, unsigned char *dest)
{
int block_num = blockIdx.x + blockIdx.y * gridDim.x;
int thread_num = threadIdx.y * blockDim.x + threadIdx.x;
int threads_in_block = blockDim.x * blockDim.y;
//Since the image is RGBA we multiply the index 4.
//We'll only use the first 3 (RGB) channels though
int idx = 4 * (threads_in_block * block_num + thread_num);
dest[idx ] = 255 - source[idx ];
dest[idx+1] = 255 - source[idx+1];
dest[idx+2] = 255 - source[idx+2];
}
""")
invert = cuda_module.get_function("invert")
# The argument "PP" indicates that the invert function will take two PBOs as arguments
invert.prepare("PP")
# create source and destination pixel buffer objects for processing
create_PBOs(*initial_size)
glutMainLoop()
# Print message to console, and kick off the main to get it rolling.
if __name__ == "__main__":
print("Hit ESC key to quit, 'a' to toggle animation, and 'e' to toggle cuda")
main()
|
alipay/aop/api/domain/BusinessLicenceInfo.py | snowxmas/alipay-sdk-python-all | 213 | 12630212 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class BusinessLicenceInfo(object):
def __init__(self):
self._business_license_auth_pic = None
self._business_license_city = None
self._business_license_indate = None
self._business_license_is_three_in_one = None
self._business_license_no = None
self._business_license_pic = None
self._business_license_province = None
self._business_scope = None
self._company_address = None
self._company_name = None
self._org_code_certificate_no = None
self._org_code_certificate_pic = None
@property
def business_license_auth_pic(self):
return self._business_license_auth_pic
@business_license_auth_pic.setter
def business_license_auth_pic(self, value):
self._business_license_auth_pic = value
@property
def business_license_city(self):
return self._business_license_city
@business_license_city.setter
def business_license_city(self, value):
self._business_license_city = value
@property
def business_license_indate(self):
return self._business_license_indate
@business_license_indate.setter
def business_license_indate(self, value):
self._business_license_indate = value
@property
def business_license_is_three_in_one(self):
return self._business_license_is_three_in_one
@business_license_is_three_in_one.setter
def business_license_is_three_in_one(self, value):
self._business_license_is_three_in_one = value
@property
def business_license_no(self):
return self._business_license_no
@business_license_no.setter
def business_license_no(self, value):
self._business_license_no = value
@property
def business_license_pic(self):
return self._business_license_pic
@business_license_pic.setter
def business_license_pic(self, value):
self._business_license_pic = value
@property
def business_license_province(self):
return self._business_license_province
@business_license_province.setter
def business_license_province(self, value):
self._business_license_province = value
@property
def business_scope(self):
return self._business_scope
@business_scope.setter
def business_scope(self, value):
self._business_scope = value
@property
def company_address(self):
return self._company_address
@company_address.setter
def company_address(self, value):
self._company_address = value
@property
def company_name(self):
return self._company_name
@company_name.setter
def company_name(self, value):
self._company_name = value
@property
def org_code_certificate_no(self):
return self._org_code_certificate_no
@org_code_certificate_no.setter
def org_code_certificate_no(self, value):
self._org_code_certificate_no = value
@property
def org_code_certificate_pic(self):
return self._org_code_certificate_pic
@org_code_certificate_pic.setter
def org_code_certificate_pic(self, value):
self._org_code_certificate_pic = value
def to_alipay_dict(self):
params = dict()
if self.business_license_auth_pic:
if hasattr(self.business_license_auth_pic, 'to_alipay_dict'):
params['business_license_auth_pic'] = self.business_license_auth_pic.to_alipay_dict()
else:
params['business_license_auth_pic'] = self.business_license_auth_pic
if self.business_license_city:
if hasattr(self.business_license_city, 'to_alipay_dict'):
params['business_license_city'] = self.business_license_city.to_alipay_dict()
else:
params['business_license_city'] = self.business_license_city
if self.business_license_indate:
if hasattr(self.business_license_indate, 'to_alipay_dict'):
params['business_license_indate'] = self.business_license_indate.to_alipay_dict()
else:
params['business_license_indate'] = self.business_license_indate
if self.business_license_is_three_in_one:
if hasattr(self.business_license_is_three_in_one, 'to_alipay_dict'):
params['business_license_is_three_in_one'] = self.business_license_is_three_in_one.to_alipay_dict()
else:
params['business_license_is_three_in_one'] = self.business_license_is_three_in_one
if self.business_license_no:
if hasattr(self.business_license_no, 'to_alipay_dict'):
params['business_license_no'] = self.business_license_no.to_alipay_dict()
else:
params['business_license_no'] = self.business_license_no
if self.business_license_pic:
if hasattr(self.business_license_pic, 'to_alipay_dict'):
params['business_license_pic'] = self.business_license_pic.to_alipay_dict()
else:
params['business_license_pic'] = self.business_license_pic
if self.business_license_province:
if hasattr(self.business_license_province, 'to_alipay_dict'):
params['business_license_province'] = self.business_license_province.to_alipay_dict()
else:
params['business_license_province'] = self.business_license_province
if self.business_scope:
if hasattr(self.business_scope, 'to_alipay_dict'):
params['business_scope'] = self.business_scope.to_alipay_dict()
else:
params['business_scope'] = self.business_scope
if self.company_address:
if hasattr(self.company_address, 'to_alipay_dict'):
params['company_address'] = self.company_address.to_alipay_dict()
else:
params['company_address'] = self.company_address
if self.company_name:
if hasattr(self.company_name, 'to_alipay_dict'):
params['company_name'] = self.company_name.to_alipay_dict()
else:
params['company_name'] = self.company_name
if self.org_code_certificate_no:
if hasattr(self.org_code_certificate_no, 'to_alipay_dict'):
params['org_code_certificate_no'] = self.org_code_certificate_no.to_alipay_dict()
else:
params['org_code_certificate_no'] = self.org_code_certificate_no
if self.org_code_certificate_pic:
if hasattr(self.org_code_certificate_pic, 'to_alipay_dict'):
params['org_code_certificate_pic'] = self.org_code_certificate_pic.to_alipay_dict()
else:
params['org_code_certificate_pic'] = self.org_code_certificate_pic
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = BusinessLicenceInfo()
if 'business_license_auth_pic' in d:
o.business_license_auth_pic = d['business_license_auth_pic']
if 'business_license_city' in d:
o.business_license_city = d['business_license_city']
if 'business_license_indate' in d:
o.business_license_indate = d['business_license_indate']
if 'business_license_is_three_in_one' in d:
o.business_license_is_three_in_one = d['business_license_is_three_in_one']
if 'business_license_no' in d:
o.business_license_no = d['business_license_no']
if 'business_license_pic' in d:
o.business_license_pic = d['business_license_pic']
if 'business_license_province' in d:
o.business_license_province = d['business_license_province']
if 'business_scope' in d:
o.business_scope = d['business_scope']
if 'company_address' in d:
o.company_address = d['company_address']
if 'company_name' in d:
o.company_name = d['company_name']
if 'org_code_certificate_no' in d:
o.org_code_certificate_no = d['org_code_certificate_no']
if 'org_code_certificate_pic' in d:
o.org_code_certificate_pic = d['org_code_certificate_pic']
return o
|
autobahn/wamp/gen/wamp/proto/DealerFeatures.py | rapyuta-robotics/autobahn-python | 1,670 | 12630213 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: proto
import flatbuffers
class DealerFeatures(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsDealerFeatures(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = DealerFeatures()
x.Init(buf, n + offset)
return x
# DealerFeatures
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# DealerFeatures
def CallerIdentification(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def CallTrustlevels(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def CallTimeout(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def CallCanceling(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def ProgressiveCallResults(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(12))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def RegistrationRevocation(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(14))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def PatternBasedRegistration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(16))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def SharedRegistration(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(18))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def SessionMetaApi(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(20))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def RegistrationMetaApi(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(22))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def TestamentMetaApi(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(24))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def PayloadTransparency(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(26))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
# DealerFeatures
def PayloadEncryptionCryptobox(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(28))
if o != 0:
return bool(self._tab.Get(flatbuffers.number_types.BoolFlags, o + self._tab.Pos))
return False
def DealerFeaturesStart(builder): builder.StartObject(13)
def DealerFeaturesAddCallerIdentification(builder, callerIdentification): builder.PrependBoolSlot(0, callerIdentification, 0)
def DealerFeaturesAddCallTrustlevels(builder, callTrustlevels): builder.PrependBoolSlot(1, callTrustlevels, 0)
def DealerFeaturesAddCallTimeout(builder, callTimeout): builder.PrependBoolSlot(2, callTimeout, 0)
def DealerFeaturesAddCallCanceling(builder, callCanceling): builder.PrependBoolSlot(3, callCanceling, 0)
def DealerFeaturesAddProgressiveCallResults(builder, progressiveCallResults): builder.PrependBoolSlot(4, progressiveCallResults, 0)
def DealerFeaturesAddRegistrationRevocation(builder, registrationRevocation): builder.PrependBoolSlot(5, registrationRevocation, 0)
def DealerFeaturesAddPatternBasedRegistration(builder, patternBasedRegistration): builder.PrependBoolSlot(6, patternBasedRegistration, 0)
def DealerFeaturesAddSharedRegistration(builder, sharedRegistration): builder.PrependBoolSlot(7, sharedRegistration, 0)
def DealerFeaturesAddSessionMetaApi(builder, sessionMetaApi): builder.PrependBoolSlot(8, sessionMetaApi, 0)
def DealerFeaturesAddRegistrationMetaApi(builder, registrationMetaApi): builder.PrependBoolSlot(9, registrationMetaApi, 0)
def DealerFeaturesAddTestamentMetaApi(builder, testamentMetaApi): builder.PrependBoolSlot(10, testamentMetaApi, 0)
def DealerFeaturesAddPayloadTransparency(builder, payloadTransparency): builder.PrependBoolSlot(11, payloadTransparency, 0)
def DealerFeaturesAddPayloadEncryptionCryptobox(builder, payloadEncryptionCryptobox): builder.PrependBoolSlot(12, payloadEncryptionCryptobox, 0)
def DealerFeaturesEnd(builder): return builder.EndObject()
|
towhee/models/layers/convmlp.py | ThyeeZz/towhee | 365 | 12630237 | # Copyright 2021 <NAME> and Zilliz. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from torch import nn
class ConvMlp(nn.Module):
""" MLP using 1x1 convs that keeps spatial dims
"""
def __init__(
self, in_features, hidden_features=None, out_features=None, act_layer=nn.ReLU, norm_layer=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Conv2d(in_features, hidden_features, kernel_size=1, bias=True)
self.norm = norm_layer(hidden_features) if norm_layer else nn.Identity()
self.act = act_layer()
self.fc2 = nn.Conv2d(hidden_features, out_features, kernel_size=1, bias=True)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.norm(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
return x
|
src/python/pants/build_graph/build_file_aliases_test.py | yoav-orca/pants | 1,806 | 12630241 | # Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import pytest
from pants.build_graph.build_file_aliases import BuildFileAliases
class TestBuildFileAliasesTest:
def test_create(self):
assert BuildFileAliases(objects={}, context_aware_object_factories={}) == BuildFileAliases()
objects = {"jane": 42}
assert BuildFileAliases(
objects=objects, context_aware_object_factories={}
) == BuildFileAliases(objects=objects)
factories = {"jim": lambda ctx: "bob"}
assert BuildFileAliases(
objects={}, context_aware_object_factories=factories
) == BuildFileAliases(context_aware_object_factories=factories)
assert BuildFileAliases(
objects=objects, context_aware_object_factories={}
) == BuildFileAliases(objects=objects)
assert BuildFileAliases(
objects=objects, context_aware_object_factories=factories
) == BuildFileAliases(objects=objects, context_aware_object_factories=factories)
def test_bad_context_aware_object_factories(self):
with pytest.raises(TypeError):
BuildFileAliases(context_aware_object_factories={"george": 1})
def test_merge(self):
e_factory = lambda ctx: "e"
f_factory = lambda ctx: "f"
first = BuildFileAliases(objects={"d": 2}, context_aware_object_factories={"e": e_factory})
second = BuildFileAliases(
objects={"c": 1, "d": 42},
context_aware_object_factories={"f": f_factory},
)
expected = BuildFileAliases(
# second overrides first
objects={"d": 42, "c": 1},
# combine
context_aware_object_factories={"e": e_factory, "f": f_factory},
)
assert expected == first.merge(second)
|
tools/perf/metrics/cpu_unittest.py | google-ar/chromium | 2,151 | 12630244 | <reponame>google-ar/chromium
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from metrics import cpu
# Testing private method.
# pylint: disable=protected-access
class CpuMetricTest(unittest.TestCase):
def testSubtractCpuStats(self):
# The result computed is a ratio of cpu time used to time elapsed.
start = {'Browser': {'CpuProcessTime': 0, 'TotalTime': 0}}
end = {'Browser': {'CpuProcessTime': 5, 'TotalTime': 20}}
self.assertEqual({'Browser': 0.25}, cpu._SubtractCpuStats(end, start))
# An error is thrown if the args are called in the wrong order.
self.assertRaises(AssertionError, cpu._SubtractCpuStats, start, end)
# An error is thrown if there's a process type in end that's not in start.
end['Renderer'] = {'CpuProcessTime': 2, 'TotalTime': 20}
self.assertRaises(AssertionError, cpu._SubtractCpuStats, end, start)
# A process type will be ignored if there's an empty dict for start or end.
start['Renderer'] = {}
self.assertEqual({'Browser': 0.25}, cpu._SubtractCpuStats(end, start))
# Results for multiple process types can be computed.
start['Renderer'] = {'CpuProcessTime': 0, 'TotalTime': 0}
self.assertEqual({'Browser': 0.25, 'Renderer': 0.1},
cpu._SubtractCpuStats(end, start))
# Test 32-bit overflow.
start = {'Browser': {'CpuProcessTime': 0, 'TotalTime': 2 ** 32 / 100. - 20}}
end = {'Browser': {'CpuProcessTime': 5, 'TotalTime': 20}}
self.assertEqual({'Browser': 0.125}, cpu._SubtractCpuStats(end, start))
self.assertRaises(AssertionError, cpu._SubtractCpuStats, start, end)
|
book/src/ch02/src/sequences.py | zangyuchen2008/Clean-Code-in-Python-Second-Edition | 133 | 12630249 | """Clean Code in Python - Chapter 2: Pythonic Code
> Sequences
"""
from collections.abc import Sequence
class Items(Sequence):
def __init__(self, *values):
self._values = list(values)
def __len__(self):
return len(self._values)
def __getitem__(self, item):
return self._values.__getitem__(item)
|
contrib/cryptsetup/tests/fileDiffer.py | lambdaxymox/DragonFlyBSD | 432 | 12630253 | <reponame>lambdaxymox/DragonFlyBSD
#!/usr/bin/env python
#
# Usage: fileDiffer <afile> <bfile> <list of disk changes>
#
# LUKS
# quick regression test suite
# Tests LUKS images for changes at certain disk offsets
#
# Does fast python code has to look ugly or is it just me?
import sys
class changes:
pass
def parseArgs(args):
aFileName = args[1]
bFileName = args[2]
changelist = []
args[0:3] = []
for i in args:
mychanges = changes();
if i.startswith('A'):
mychanges.mode = 'ALLOWED'
if i.startswith('R'):
mychanges.mode = 'REQUIRED'
mychanges.strictness = 'RANDOM'
if i.startswith('S'):
mychanges.mode = 'REQUIRED'
mychanges.strictness = 'SEMANTIC'
dashIndex = i.find('-')
if dashIndex == -1:
mychanges.starts = int(i[1:])
mychanges.ends = mychanges.starts
else:
mychanges.starts = int(i[1:dashIndex])
mychanges.ends = int(i[dashIndex+1:])
mychanges.miss = 0
changelist.append(mychanges)
mychanges = changes();
mychanges.starts = 0
# mychanges.ends will be fixed later
mychanges.mode = 'FORBIDDEN'
changelist.append(mychanges)
return [aFileName, bFileName, changelist]
def mode(i):
for c in changelist:
if i >= c.starts and i<=c.ends:
return c
def cleanchanges(i):
newchangelist=[]
for c in changelist:
if i <= c.starts or i <= c.ends:
newchangelist.append(c)
return newchangelist
[aFileName, bFileName, changelist] = parseArgs(sys.argv)
aFile = open(aFileName,'r')
bFile = open(bFileName,'r')
aString = aFile.read()
bString = bFile.read()
if len(aString) != len(bString):
sys.exit("Mismatch different file sizes")
fileLen = len(aString)
fileLen10th = fileLen/10
# Create a catch all entry
changelist[-1].ends = fileLen
print "Changes list: (FORBIDDEN default)"
print "start\tend\tmode\t\tstrictness"
for i in changelist:
if i.mode == 'REQUIRED':
print "%d\t%d\t%s\t%s" % (i.starts, i.ends, i.mode, i.strictness)
else:
print "%d\t%d\t%s" % (i.starts, i.ends, i.mode)
filepos = 0
fileLen10thC = 0
print "[..........]"
sys.stdout.write("[")
sys.stdout.flush()
modeNotTrivial = 1
while filepos < fileLen:
if modeNotTrivial == 1:
c = mode(filepos)
# print (filepos, c.mode)
if c.mode == 'REQUIRED':
if aString[filepos] == bString[filepos]:
c.miss = c.miss + 1
else:
if aString[filepos] != bString[filepos] and c.mode != 'ALLOWED':
sys.exit("Mismatch at %d: change forbidden" % filepos)
# Do some maintaince, print progress bar, and clean changelist
#
# Maintaining two counters appears to be faster than modulo operation
if fileLen10thC == fileLen10th:
fileLen10thC = 0
sys.stdout.write(".")
sys.stdout.flush()
changelist = cleanchanges(filepos)
if len(changelist) == 1:
modeNotTrivial = 0
filepos = filepos + 1
fileLen10thC = fileLen10thC + 1
for c in changelist:
if c.mode == 'REQUIRED':
if c.strictness == 'SEMANTIC' and c.miss == (c.ends-c.starts+1):
sys.exit("Mismatch: not even a single change in region %d-%d." % (c.starts, c.ends))
# This is not correct. We should do a statistical test
# of the sampled data against the hypothetical distribution
# of collision. Chi-Square Test.
if c.strictness == 'RANDOM' and c.miss == (c.ends-c.starts+1):
sys.exit("Mismatch: not even a single change in region %d-%d." % (c.starts, c.ends))
print ".] - everything ok"
|
securetea/lib/malware_analysis/malwareAnalysisJSONDisplay.py | Off3nsiv3huNt/SecureTea-Project | 257 | 12630263 | # !/bin/python
# -*- coding: utf-8 -*-
u"""SecureTea Social Engineering
Project:
╔═╗┌─┐┌─┐┬ ┬┬─┐┌─┐╔╦╗┌─┐┌─┐
╚═╗├┤ │ │ │├┬┘├┤ ║ ├┤ ├─┤
╚═╝└─┘└─┘└─┘┴└─└─┘ ╩ └─┘┴ ┴
Author: <NAME> <<EMAIL>> August 16 2021
Version: 1.0
Module: SecureTea
"""
import json
import pandas
from securetea.lib.malware_analysis import globals
class JSONDisplay:
'''
Contains functions that analyse JSON responses from VirusTotal API and displays formatted results
verbosity_1()
Displays how many Antivirus engines detected malware in selected file
verbosity_2()
Displays Antiviruses along with their detection statuses in a formatted manner
verbosity_3()
Displays Antiviruses along with their detection statuses, method and result in a formatted manner
verbosity_4()
Display all information about scan in a table format. Uses pandas to create dataframe from JSON and then display it out on screen
'''
def __init__(self, av_engines_list):
pass
self.results_df = pandas.DataFrame(av_engines_list)
self.results_df = self.results_df.transpose()
globals.initialize_colours()
def threat_level_threading(self, filename, report_id):
"""
takes JSON responses from VirusTotal API
Edits 5 global dicts from globals.py
Puts values like - filename : value
No need to return anything because Global vars
"""
detected = 0
undetected = 0
unsupported = 0
total = 0
av_names = self.results_df.index
for av_name in av_names:
status = self.results_df.loc[av_name]['category']
if status == 'undetected':
undetected = undetected + 1
total = total + 1
elif status == 'malicious':
detected = detected + 1
total = total + 1
elif status == 'type-unsupported':
unsupported = unsupported + 1
total = total + 1
else:
total = total + 1
globals.detected[filename] = detected
globals.undetected[filename] = undetected
globals.unsupported[filename] = unsupported
globals.total[filename] = total
globals.report_id[filename] = report_id
def threat_level(self):
undetected = 0
detected = 0
unsupported = 0
total = 0
av_names = self.results_df.index
for av_name in av_names:
status = self.results_df.loc[av_name]['category']
print(status)
if status == 'undetected':
undetected = undetected + 1
total = total + 1
elif status == 'detected':
detected = detected + 1
total = total + 1
elif status == 'type-unsupported':
unsupported = unsupported + 1
total = total + 1
else:
total = total + 1
return detected, undetected, unsupported, total
def verbosity_1(self):
undetected = 0
detected = 0
unsupported = 0
total = 0
av_names = self.results_df.index
for av_name in av_names:
status = self.results_df.loc[av_name]['category']
if status == 'undetected':
undetected = undetected + 1
total = total + 1
elif status == 'detected':
detected = detected + 1
total = total + 1
elif status == 'type-unsupported':
unsupported = unsupported + 1
total = total + 1
else:
total = total + 1
print(globals.RED + 'Detected : ' + str(detected) + '/' + str(total) + globals.END)
print('Undetected : ' + str(undetected) + '/' + str(total))
print('Type unsupported : ' + str(unsupported) + '/' + str(total))
def verbosity_2(self):
print((self.results_df.loc[:, ['category']]).to_string())
def verbosity_3(self):
print((self.results_df.loc[:, ['category', 'result', 'method']]).to_string())
def verbosity_4(self):
print(self.results_df.to_string())
|
exercises/zh/test_03_11.py | Jette16/spacy-course | 2,085 | 12630278 | <reponame>Jette16/spacy-course<filename>exercises/zh/test_03_11.py
def test():
assert Span.has_extension(
"wikipedia_url"
), "你有在span上注册这个扩展吗?"
ext = Span.get_extension("wikipedia_url")
assert ext[2] is not None, "你有正确设置getter吗?"
assert (
"getter=get_wikipedia_url" in __solution__
), "你有设置getter为get_wikipedia_url了吗?"
assert (
"(ent.text, ent._.wikipedia_url)" in __solution__
), "你有读取到定制化属性了吗?"
assert (
doc.ents[-1]._.wikipedia_url
== "https://zh.wikipedia.org/w/index.php?search=周杰伦"
), "貌似这个属性的值是错误的。"
__msg__.good(
"漂亮!我们现在有了一个定制化的模型组件,使用模型预测的命名实体来生成"
"维基百科的URL,然后把它们设定成为一个定制化属性。可以在浏览器中打开这"
"个网址看看吧!"
)
|
util/config/fixpath.py | jhh67/chapel | 1,602 | 12630321 | #!/usr/bin/env python3
"""Removes path components that begin with $CHPL_HOME from the given path.
./fixpath.py path-value [--shell shell]
Example:
./fixpath.py "$PATH"
./fixpath.py \\" $PATH \\" --shell=fish
This is used by the setchplenv.* scripts to reduce PATH/MANPATH pollution. It
may be called in several situations:
1. No Chapel environment settings (new shell)
2. Same $CHPL_HOME as last time (re-running setchplenv in same dir)
3. Different $CHPL_HOME (cd ../other-chapel-dir).
4. $CHPL_HOME is set, but path doesn't include an old one.
($CHPL_HOME was hand-set, now setchplenv is run)
For case 1, just return the existing environment variable.
For case 2, return the environment variable without the components
that begin with $CHPL_HOME.
For case 3, setchplenv invokes this script before setting the new
$CHPL_HOME. We still have the old $CHPL_HOME set, so we can remove
the old $PATH and $MANPATH entries. The upshot is we do the same thing
as in case 2.
Case 4 should also be the same as case 2, but we won't remove any
components since there should be no components starting with $CHPL_HOME.
Mentioned only to avoid reintroducing #10196 when this function is modified.
"""
import optparse
import os
import re
import sys
def escape_path(p, delim):
"""Wrap fish paths in quotes to prevent splitting on spaces in paths"""
if delim == ' ':
return '"{}"'.format(p)
return p
def remove_chpl_from_path(path_val, delim):
"""
:path_val: path environment variable value ('$PATH' or '$MANPATH')
:delim: path delimiter (':' or ' ')
:returns: new path with $CHPL_HOME components removed
"""
chpl_home = os.getenv('CHPL_HOME')
if not chpl_home or chpl_home not in path_val:
return path_val
# Find delims that are not escaped
pattern = r'(?<!\\)\{0}'.format(delim)
# Split path by non-escaped delims, and sieve chpl_home
# Fish input includes hanging quotation marks, so we drop those here
newpath = [escape_path(p, delim) for p in re.split(pattern, path_val) if p != '"']
newpath = [p for p in newpath if chpl_home not in p]
return delim.join(newpath)
def update_path_env():
os.environ["PATH"] = remove_chpl_from_path(os.environ["PATH"], ":")
def main():
parser = optparse.OptionParser(usage=__doc__)
parser.add_option('--shell', dest='shell', default='bash',
help='shell being used')
(options, args) = parser.parse_args()
if options.shell == 'fish':
delim = ' '
else:
delim = ':'
if len(args) == 0:
sys.stderr.write('Error: path-value must be supplied\n\n')
parser.print_help()
sys.exit(1)
path = delim.join(args)
newpath = remove_chpl_from_path(path, delim)
sys.stdout.write('{0}'.format(newpath))
if __name__ == '__main__':
main()
|
nel/__main__.py | psyML/nel | 196 | 12630344 | <reponame>psyML/nel
#!/usr/bin/env python
import argparse
import re
import sys
import textwrap
from .corpora import prepare, analysis, visualise
from .harness import harness
from .learn import ranking, resolving, recognition
from .process.process import CorpusProcessor
from .process.tag import Tagger
from .process.candidates import CandidateGenerator
from .process.coref import MentionClusterer
from .features.feature import Feature
from nel import logging
log = logging.getLogger()
APPS = [
prepare.PrepareCorpus,
analysis.CorpusStats,
visualise.CompareCorpusAnnotations,
recognition.TrainSequenceClassifier,
ranking.TrainLinearRanker,
resolving.TrainLinearResolver,
resolving.FitNilThreshold,
harness.BatchLink,
harness.ServiceHarness
]
CORPUS_PROCESSORS = [
('tag-documents', Tagger),
('generate-candidates', CandidateGenerator),
('cluster-mentions', MentionClusterer),
('extract-feature', Feature),
]
def add_subparser(sp, cls, name = None, doc_text = None):
name = name or cls.__name__
doc_text = doc_text or cls.__doc__
csp = sp.add_parser(
name,
help=doc_text.split('\n')[0],
description=textwrap.dedent(doc_text.rstrip()),
formatter_class=argparse.RawDescriptionHelpFormatter)
return csp
def main(args=sys.argv[1:]):
p = argparse.ArgumentParser(description='nel entity linking framework')
sp = p.add_subparsers()
for cls in APPS:
csp = add_subparser(sp, cls, name=re.sub('([A-Z])', r'-\1', cls.__name__).lstrip('-').lower())
cls.add_arguments(csp)
for name, cls in CORPUS_PROCESSORS:
csp = add_subparser(sp, CorpusProcessor, name=name, doc_text=cls.__doc__)
CorpusProcessor.add_arguments(csp)
subsp = csp.add_subparsers()
for subcls in cls.iter_options():
subcsp = add_subparser(subsp, subcls)
subcls.add_arguments(subcsp)
subcsp.set_defaults(mappercls=subcls)
namespace = vars(p.parse_args(args))
cls = namespace.pop('cls')
try:
obj = cls(**namespace)
except ValueError as e:
p.error(str(e))
obj()
if __name__ == '__main__':
main()
|
instant/tests/test_utils.py | synw/django-instant | 103 | 12630361 | <gh_stars>100-1000
from .base import InstantBaseTest
from instant.init import ensure_channel_is_private
class InstantTestUtils(InstantBaseTest):
def test_ensure_channel_is_private(self):
name = ensure_channel_is_private("$chan")
self.assertEqual(name, "$chan")
name = ensure_channel_is_private("chan")
self.assertEqual(name, "$chan")
name = ensure_channel_is_private("ns:$chan")
self.assertEqual(name, "ns:$chan")
name = ensure_channel_is_private("ns:chan")
self.assertEqual(name, "ns:$chan")
|
mobilenet_v2.py | Lehar24/mobile-deeplab-v3-plus | 166 | 12630373 | """MobileNet v2 model
# Reference
- [Inverted Residuals and Linear Bottlenecks Mobile Networks for
Classification, Detection and Segmentation]
(https://arxiv.org/abs/1801.04381)
"""
import os
import tensorflow as tf
import layers
from utils import op
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs, **_: _make_divisible(num_inputs * n, divisible_by)
def depth_multiply(output_params,
multiplier,
divisible_by=8,
min_depth=8):
if 'num_outputs' not in output_params:
return
d = output_params['num_outputs']
output_params['num_outputs'] = _make_divisible(d * multiplier,
divisible_by,
min_depth)
class Conv2DBN(tf.keras.layers.Layer):
def __init__(self,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
dilation_rate=1,
stddev=0.09,
weight_decay=0.00004,
use_bias=False,
use_bn=True,
bn_momentum=0.997,
activation_fn=tf.nn.relu6):
super(Conv2DBN, self).__init__()
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
self.conv2d = tf.keras.layers.Conv2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
dilation_rate=dilation_rate,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay))
if not use_bias and use_bn:
self.bn = tf.keras.layers.BatchNormalization(
momentum=bn_momentum,
center=True,
scale=True)
self.activation = activation_fn
def call(self, inputs, training=True):
x = self.conv2d(inputs)
if self.bn:
x = self.bn(x, training=training)
if self.activation:
x = self.activation(x)
return x
class MobilenetV2(object):
def __init__(self,
output_stride=None,
depth_multiplier=1.0,
min_depth=8,
divisible_by=8,
quant_friendly=False):
if output_stride is not None:
if output_stride == 0 or \
(output_stride > 1 and output_stride % 2):
raise ValueError(
'Output stride must be None, 1 or a multiple of 2.')
self.output_stride = output_stride
self.depth_multiplier = depth_multiplier
self.min_depth = min_depth
self.divisible_by = divisible_by
# remove bn and activation behind depthwise-convolution
# replace relu6 with relu
self.quant_friendly = quant_friendly
self.losses_list = []
def losses(self):
return self.losses_list
def _conv2d(self,
input_tensor,
num_outputs,
kernel_size,
stride=1,
padding='SAME',
dilation_rate=1,
stddev=0.09,
weight_decay=0.00004,
use_bias=False,
use_bn=True,
bn_momentum=0.997,
activation_fn=tf.nn.relu6,
quant_friendly=False,
is_training=True,
scope=None):
net = input_tensor
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
with tf.variable_scope(scope, default_name="Conv"):
conv2d = tf.keras.layers.Conv2D(
filters=num_outputs,
kernel_size=kernel_size,
strides=stride,
padding=padding,
dilation_rate=dilation_rate,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
kernel_regularizer=tf.keras.regularizers.l2(weight_decay),
name='conv2d')
net = conv2d(net)
self.losses_list.extend(conv2d.losses)
tf.summary.histogram('Weights', conv2d.weights[0])
if not use_bias and use_bn:
# keras layers' update op is not in global update_op collections
net = tf.layers.batch_normalization(
net,
momentum=bn_momentum,
training=is_training,
name='BatchNorm')
if activation_fn:
if quant_friendly:
activation_fn = tf.nn.relu
net = activation_fn(net)
tf.summary.histogram('Activation', net)
return tf.identity(net, name="output")
def _expanded_conv(self,
input_tensor,
num_outputs,
kernel_size=3,
stride=1,
padding='SAME',
dilation_rate=1,
expansion_size=expand_input_by_factor(6),
depthwise_location='expand',
depthwise_multiplier=1,
weight_decay=0.00004,
quant_friendly=False,
residual=True,
is_training=True,
scope=None):
input_depth = input_tensor.get_shape().as_list()[3]
net = input_tensor
with tf.variable_scope(scope, default_name="expanded_conv") as s, \
tf.name_scope(s.original_name_scope):
if depthwise_location not in [None, 'input', 'output', 'expand']:
raise TypeError('%r is unknown value for depthwise_location' %
depthwise_location)
if callable(expansion_size):
expansion_chan = expansion_size(num_inputs=input_depth)
else:
expansion_chan = expansion_size
# expansion
if depthwise_location == 'expand':
net = self._conv2d(net,
num_outputs=expansion_chan,
kernel_size=[1, 1],
weight_decay=weight_decay,
is_training=is_training,
quant_friendly=quant_friendly,
scope="expand")
net = tf.identity(net, name="expand_output")
# depthwise convolution
net = layers.depthwise_conv(
net,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation_rate=dilation_rate,
depth_multiplier=depthwise_multiplier,
quant_friendly=quant_friendly,
is_training=is_training,
scope="depthwise")
net = tf.identity(net, name="depthwise_output")
# projection
net = self._conv2d(net,
num_outputs=num_outputs,
kernel_size=[1, 1],
weight_decay=weight_decay,
activation_fn=None,
is_training=is_training,
scope="project")
net = tf.identity(net, name="project_output")
output_depth = net.get_shape().as_list()[3]
if residual and stride == 1 and input_depth == output_depth:
net += input_tensor
return tf.identity(net, name="output")
def model_def(self):
model_def = dict(
spec=[
op(self._conv2d, num_outputs=32, stride=2,
kernel_size=[3, 3]),
op(self._expanded_conv, num_outputs=16,
expansion_size=expand_input_by_factor(1, 1),
depthwise_location='input'),
op(self._expanded_conv, num_outputs=24, stride=2),
op(self._expanded_conv, num_outputs=24, stride=1),
op(self._expanded_conv, num_outputs=32, stride=2),
op(self._expanded_conv, num_outputs=32, stride=1),
op(self._expanded_conv, num_outputs=32, stride=1),
op(self._expanded_conv, num_outputs=64, stride=2),
op(self._expanded_conv, num_outputs=64, stride=1),
op(self._expanded_conv, num_outputs=64, stride=1),
op(self._expanded_conv, num_outputs=64, stride=1),
op(self._expanded_conv, num_outputs=96, stride=1),
op(self._expanded_conv, num_outputs=96, stride=1),
op(self._expanded_conv, num_outputs=96, stride=1),
op(self._expanded_conv, num_outputs=160, stride=2),
op(self._expanded_conv, num_outputs=160, stride=1),
op(self._expanded_conv, num_outputs=160, stride=1),
op(self._expanded_conv, num_outputs=320, stride=1),
op(self._conv2d, num_outputs=1280, stride=1,
kernel_size=[1, 1]),
]
)
return model_def
def forward_base(self,
input_tensor,
final_endpoint=None,
is_training=True,
scope="MobilenetV2"):
model_def = self.model_def()
endpoints = {}
scopes = {}
with tf.variable_scope(scope) as s, \
tf.name_scope(s.original_name_scope):
# The current_stride variable keeps track of the output stride of
# the activations, i.e., the running product of convolution strides
# up to the current network layer.
# This allows us to invoke atrous convolution whenever applying the
# next convolution would result in the activations
# having output stride larger than the target output_stride.
current_stride = 1
# The atrous convolution rate parameter.
rate = 1
net = input_tensor
for i, opdef in enumerate(model_def['spec']):
params = dict(opdef.params)
depth_multiply(params,
self.depth_multiplier,
self.divisible_by,
self.min_depth)
params['is_training'] = is_training
stride = params.get('stride', 1)
if self.output_stride is not None and \
current_stride == self.output_stride:
# If we have reached the target output_stride,
# then we need to employ atrous convolution with stride=1
# and multiply the atrous rate by the
# current unit's stride for use in subsequent layers.
layer_stride = 1
layer_rate = rate
rate *= stride
else:
layer_stride = stride
layer_rate = 1
current_stride *= stride
# Update params.
params['quant_friendly'] = self.quant_friendly
params['stride'] = layer_stride
# Only insert rate to params if rate > 1.
if layer_rate > 1:
params['dilation_rate'] = layer_rate
endpoint = 'layer_%d' % (i + 1)
try:
net = opdef.op(net, **params)
except Exception:
print('Failed to create op %i: %r params: %r' %
(i, opdef, params))
raise
endpoints[endpoint] = net
scope_name = os.path.dirname(net.name)
scopes[scope_name] = endpoint
if final_endpoint is not None and endpoint == final_endpoint:
break
# Add all tensors that end with 'output' to endpoints
for t in net.graph.get_operations():
scope_name = os.path.dirname(t.name)
bn = os.path.basename(t.name)
if scope_name in scopes and t.name.endswith('output'):
endpoints[scopes[scope_name] + '/' + bn] = t.outputs[0]
return net, endpoints
def forward(self,
input_tensor,
num_classes=1001,
final_endpoint=None,
prediction_fn=tf.nn.softmax,
is_training=True,
base_only=False):
input_shape = input_tensor.get_shape().as_list()
if len(input_shape) != 4:
raise ValueError(
'Expected rank 4 input, was: %d' % len(input_shape))
with tf.variable_scope('MobilenetV2', reuse=tf.AUTO_REUSE) as scope:
inputs = tf.identity(input_tensor, 'input')
net, end_points = self.forward_base(
inputs,
final_endpoint=final_endpoint,
is_training=is_training,
scope=scope)
if base_only:
return net, end_points
net = tf.identity(net, name='embedding')
with tf.variable_scope('Logits'):
net = layers.global_pool(net)
end_points['global_pool'] = net
if not num_classes:
return net, end_points
if is_training:
net = tf.keras.layers.Dropout(rate=0.2)(net)
# 1 x 1 x num_classes
# Note: legacy scope name.
logits = self._conv2d(
net,
num_classes,
[1, 1],
use_bias=True,
use_bn=False,
activation_fn=None,
is_training=is_training,
scope='Conv2d_1c_1x1')
logits = tf.squeeze(logits, [1, 2])
logits = tf.identity(logits, name='output')
end_points['Logits'] = logits
if prediction_fn:
end_points['Predictions'] = prediction_fn(logits,
name='Predictions')
return logits, end_points
|
pokemongo_bot/navigation/path_finder/path_finder.py | golem4300/quattran | 183 | 12630390 | <reponame>golem4300/quattran
class PathFinder(object):
"""
Abstract class for a path finder
"""
def __init__(self, config):
# type: (Namespace, Stepper) -> None
self.config = config
def path(self, from_lat, form_lng, to_lat, to_lng): # pragma: no cover
# type: (float, float, float, float) -> List[(float, float)]
raise NotImplementedError
|
hw/ip/otbn/dv/rig/rig/gens/known_wdr.py | asb/opentitan | 1,375 | 12630396 | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
from typing import Optional
from shared.operand import EnumOperandType, ImmOperandType, RegOperandType
from shared.insn_yaml import InsnsFile
from ..config import Config
from ..model import Model
from ..program import ProgInsn, Program
from ..snippet import ProgSnippet
from ..snippet_gen import GenCont, GenRet, SnippetGen
class KnownWDR(SnippetGen):
'''A snippet generator that generates known values (all zeros or all ones
for now) for WDRs.
'''
def __init__(self, cfg: Config, insns_file: InsnsFile) -> None:
super().__init__()
self.bn_xor = self._get_named_insn(insns_file, 'bn.xor')
self.bn_not = self._get_named_insn(insns_file, 'bn.not')
# BN.XOR has six operands: wrd, wrs1, wrs2, shift_type, shift_value
# and flag_group
ops = self.bn_xor.operands
if not (len(ops) == 6 and
isinstance(ops[0].op_type, RegOperandType) and
ops[0].op_type.reg_type == 'wdr' and
ops[0].op_type.is_dest() and
isinstance(ops[1].op_type, RegOperandType) and
ops[1].op_type.reg_type == 'wdr' and
not ops[1].op_type.is_dest() and
isinstance(ops[2].op_type, RegOperandType) and
ops[2].op_type.reg_type == 'wdr' and
not ops[2].op_type.is_dest() and
isinstance(ops[4].op_type, ImmOperandType)):
raise RuntimeError('BN.XOR instruction from instructions file is '
'not the shape expected by the KnownWDR '
'generator.')
self.wrd_op_type = ops[0].op_type
self.wrs_op_type = ops[1].op_type
self.imm_op_type = ops[4].op_type
assert self.imm_op_type.shift == 3
ops = self.bn_not.operands
if not (isinstance(ops[0].op_type, RegOperandType) and
ops[0].op_type.reg_type == 'wdr' and
ops[0].op_type.is_dest() and
isinstance(ops[1].op_type, RegOperandType) and
ops[1].op_type.reg_type == 'wdr' and
not ops[1].op_type.is_dest() and
isinstance(ops[2].op_type, EnumOperandType) and
isinstance(ops[3].op_type, ImmOperandType)):
raise RuntimeError('BN.NOT instruction from instructions file is '
'not the shape expected by the KnownWDR '
'generator.')
def gen(self,
cont: GenCont,
model: Model,
program: Program) -> Optional[GenRet]:
# Return None if this one of the last two instructions in the current
# gap because we need to either jump or do an ECALL to avoid getting
# stuck after executing both bn.xor and bn.not
if program.get_insn_space_at(model.pc) <= 3:
return None
# The bn.xor-bn.not pair takes 2 instructions
if program.space < 2:
return None
if model.fuel < 2:
return None
# Picks a random operand value for wrd.
wrd_val_xor = model.pick_operand_value(self.wrd_op_type)
if wrd_val_xor is None:
return None
# Picks a random operand value. It shouldn't matter because
# in the end, we will feed the same value as wrs2 and XORing
# would result with wrd becoming 0.
wrs_val_xor = model.pick_operand_value(self.wrs_op_type)
if wrs_val_xor is None:
return None
# Assertion is always true because ImmOperand has width embedded in it
shift_bits = self.imm_op_type.op_val_to_enc_val(0, model.pc)
assert shift_bits is not None
# Value of shift_type does not matter since shift_bits are hardcoded to
# 0
shift_type = model.pick_operand_value(self.bn_xor.operands[3].op_type)
assert shift_type is not None
# This value does not matter for this application
flg_group = model.pick_operand_value(self.bn_xor.operands[5].op_type)
assert flg_group is not None
# Result of this insn can be written to any register.
wrd_val_not = model.pick_operand_value(self.wrd_op_type)
if wrd_val_not is None:
return None
op_vals_xor = [wrd_val_xor, wrs_val_xor, wrs_val_xor, shift_type,
shift_bits, flg_group]
op_vals_not = [wrd_val_not, wrd_val_xor,
shift_type, shift_bits, flg_group]
prog_bn_xor = ProgInsn(self.bn_xor, op_vals_xor, None)
prog_bn_not = ProgInsn(self.bn_not, op_vals_not, None)
snippet = ProgSnippet(model.pc, [prog_bn_xor, prog_bn_not])
snippet.insert_into_program(program)
model.update_for_insn(prog_bn_xor)
model.update_for_insn(prog_bn_not)
model.pc += 8
return (snippet, model)
|
pylayers/mobility/ban/test/test_DeuxSeg.py | usmanwardag/pylayers | 143 | 12630399 | <gh_stars>100-1000
import numpy as np
from pylayers.mobility.ban.DeuxSeg import *
N=5
M=10
A = np.random.rand(3,N)
B = np.random.rand(3,N)
C = np.random.rand(3,M)
D = np.random.rand(3,M)
alpha,beta,dmin = dmin3d(A,B,C,D)
f,g = dist(A,B,C,D,alpha,beta)
## verif:
aa=np.empty((N,M))
bb=np.empty((N,M))
dd=np.empty((N,M))
ff=np.empty((N,M))
gg=np.empty((N,M))
for i in range(N):
for j in range(M):
aa[i,j],bb[i,j],dd[i,j]=dmin3d_nonvectorized(A[:,i],B[:,i],C[:,j],D[:,j])
ff[i,j],gg[i,j] = dist_nonvectorized(A[:,i],B[:,i],C[:,j],D[:,j],aa[i,j],bb[i,j])
assert (aa-alpha).any()==0
assert (bb-beta).any()==0
assert (dd-dmin).any()==0
assert (ff-f).any()==0
assert (gg-g).any()==0
######################
###############
# DEBUG 1
###############
# np.random.seed(0)
# A = np.random.rand(3)
# B = np.random.rand(3)
# C = np.random.rand(3)
# D = np.random.rand(3)
# X=np.dot(A,B)
# Y=np.dot(C,D)
# A2=np.vstack((A,C)).T
# B2=np.vstack((B,D)).T
# XY=np.einsum('ij,ij->j',A2,B2)
# print 'X == XY[0]',np.allclose(X,XY[0])
# print 'Y == XY[1]',np.allclose(Y,XY[1])
# assert np.allclose(X,XY[0])
# assert np.allclose(Y,XY[1])
###############
# DEBUG 2
###############
# a,b,d=dmin3d(A,B,C,D)
# f,g = dist(A,B,C,D,a,b)
# nseg1=5
# nseg2=10
# A = np.random.rand(3,nseg1)
# B = np.random.rand(3,nseg1)
# C = np.random.rand(3,nseg2)
# D = np.random.rand(3,nseg2)
# aa=[]
# bb=[]
# dd=[]
# ff=[]
# gg=[]
# for i in range(nseg):
# a,b,d=dmin3d_old(A[:,i],B[:,i],C[:,i],D[:,i])
# f,g = dist_old(A[:,i],B[:,i],C[:,i],D[:,i],a,b)
# aa.append(a)
# bb.append(b)
# dd.append(d)
# ff.append(f)
# gg.append(g)
# aa=np.array(aa)
# bb=np.array(bb)
# dd=np.array(dd)
# ff=np.array(ff)
# gg=np.array(gg)
# import ipdb
# ipdb.set_trace()
# a,b,d=dmin3d(A,B,C,D)
# f,g = dist(A,B,C,D,a,b) |
babi/data/rnn_preprocess.py | zlgenuine/GGNN | 280 | 12630436 | <filename>babi/data/rnn_preprocess.py<gh_stars>100-1000
"""
Preprocess data for RNN training.
<NAME>, 10/2015
"""
import numpy as np
import argparse
def convert_graph_data(infile, outfile, n_val=0, n_train=0):
data_list = []
with open(infile, 'r') as f:
edges = []
questions = []
for line in f:
tokens = line.split()
if len(tokens) == 0:
data_list.append([edges, questions])
edges = []
questions = []
else:
if tokens[0] == '?':
questions.append(tokens[1:])
else:
edges.append(tokens)
if len(edges) > 0:
data_list.append([edges, questions])
if n_val == 0:
if n_train == 0:
write_data_list_to_file(data_list, outfile)
else:
np.random.shuffle(data_list)
write_data_list_to_file(data_list[:n_train], outfile)
else:
np.random.shuffle(data_list)
if n_train == 0:
write_data_list_to_file(data_list[:-n_val], outfile)
else:
write_data_list_to_file(data_list[:n_train], outfile)
write_data_list_to_file(data_list[-n_val:], outfile + '.val')
def write_data_list_to_file(data_list, filename):
with open(filename, 'w') as f:
for edges, questions in data_list:
s_edges = ''
for e in edges:
s_edges += 'n' + e[0] + ' e' + e[1] + ' n' + e[2] + ' eol '
for q in questions:
s_q = 'q' + q[0]
# allow at most 2 nodes
for i in xrange(1, min(len(q) - 1, 3)):
s_q += ' n' + q[i]
s_q += ' ans'
# allow more than one answers, which will be interpreted as a sequence
for i in xrange(min(len(q) - 1, 3), len(q)):
s_q += ' ' + q[i]
f.write(s_edges + s_q + '\n')
def convert_rnn_data(infile, outfile, dictfile=None):
"""
Convert each token in the example into an index to make processing easier.
"""
d = {}
if dictfile is not None:
with open(dictfile, 'r') as f:
for line in f:
k, v = line.split()
d[k] = int(v)
next_idx = 1
with open(outfile, 'w') as fout:
with open(infile, 'r') as fin:
for line in fin:
tokens = line.split()
in_targets = False
for i in xrange(len(tokens)):
t = tokens[i]
if in_targets:
fout.write(' ' + t)
continue
if t in d:
idx = d[t]
else:
d[t] = next_idx
idx = next_idx
next_idx += 1
fout.write('%d ' % idx)
if t == 'ans':
in_targets = True
fout.write('')
fout.write('\n')
# fout.write(tokens[-1] + '\n')
with open(outfile + '.dict', 'w') as f:
for k, v in sorted(d.items(), key=lambda t: t[0]):
f.write('%s %d\n' % (k, v))
if __name__ == '__main__':
cmd_parser = argparse.ArgumentParser(description='Convert graph data into standard form for RNNs.')
cmd_parser.add_argument('infile', help='path to the input file that contains all the graphs')
cmd_parser.add_argument('outfile', help='path to the output file to be created')
cmd_parser.add_argument('--dict', help='path to an optional dictionary file', default=None)
cmd_parser.add_argument('--mode', help='preprocessing mode', choices=['graph', 'rnn'], default='graph')
cmd_parser.add_argument('--nval', help='number of examples to use for validation', type=int, default=0)
cmd_parser.add_argument('--ntrain', help='number of examples to use for training', type=int, default=0)
args = cmd_parser.parse_args()
if args.mode == 'graph':
convert_graph_data(args.infile, args.outfile, args.nval, args.ntrain)
elif args.mode == 'rnn':
convert_rnn_data(args.infile, args.outfile, args.dict)
|
qiskit_nature/problems/sampling/protein_folding/bead_contacts/contact_map.py | jschuhmac/qiskit-nature | 132 | 12630448 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""A class that stores contacts between beads of a peptide as qubit operators."""
from typing import Dict
from .contact_map_builder import (
_create_contact_qubits,
)
from ..peptide.peptide import Peptide
class ContactMap:
"""A class that stores contacts between beads of a peptide as qubit operators. For technical
details regarding the meaning of these operators as well as a convention for their indexing,
please see the documentation in the ContactMapBuilder class."""
def __init__(self, peptide: Peptide):
"""
Args:
peptide: A Peptide object that includes all information about a protein.
"""
self._peptide = peptide
(
self._lower_main_upper_main,
self._lower_side_upper_main,
self._lower_main_upper_side,
self._lower_side_upper_side,
self.num_contacts,
) = _create_contact_qubits(peptide)
@property
def peptide(self) -> Peptide:
"""Returns a peptide."""
return self._peptide
@property
def lower_main_upper_main(self) -> Dict[int, dict]:
"""Returns a dictionary which is a component of a contact map that stores contact operators
between a bead on a main chain (first index in a dictionary) and a bead in a main chain (
second index in a dictionary)."""
return self._lower_main_upper_main
@property
def lower_side_upper_main(self) -> Dict[int, dict]:
"""Returns a dictionary which is a component of a contact map that stores contact operators
between a first bead in a side chain (first index in a dictionary) and a bead in a main
chain (second index in a dictionary)."""
return self._lower_side_upper_main
@property
def lower_main_upper_side(self) -> Dict[int, dict]:
"""Returns a dictionary which is a component of a contact map that stores contact operators
between a bead in a main chain (first index in a dictionary) and a first bead in a side
chain (second index in a dictionary)."""
return self._lower_main_upper_side
@property
def lower_side_upper_side(self) -> Dict[int, dict]:
"""Returns a dictionary which is a component of a contact map that stores contact operators
between a first bead in a side chain (first index in a dictionary) and a first bead in a
side chain (second index in a dictionary)."""
return self._lower_side_upper_side
|
libs/sqlobject/tests/test_converters.py | scambra/HTPC-Manager | 422 | 12630498 | from datetime import timedelta
import sys
from sqlobject.converters import registerConverter, sqlrepr, \
quote_str, unquote_str
from sqlobject.sqlbuilder import SQLExpression, SQLObjectField, \
Select, Insert, Update, Delete, Replace, \
SQLTrueClauseClass, SQLConstant, SQLPrefix, SQLCall, SQLOp, \
_LikeQuoted
class TestClass:
def __repr__(self):
return '<TestClass>'
def TestClassConverter(value, db):
return repr(value)
registerConverter(TestClass, TestClassConverter)
class NewTestClass:
__metaclass__ = type
def __repr__(self):
return '<NewTestClass>'
def NewTestClassConverter(value, db):
return repr(value)
registerConverter(NewTestClass, NewTestClassConverter)
def _sqlrepr(self, db):
return '<%s>' % self.__class__.__name__
SQLExpression.__sqlrepr__ = _sqlrepr
############################################################
## Tests
############################################################
def test_simple_string():
assert sqlrepr('A String', 'firebird') == "'A String'"
def test_string_newline():
assert sqlrepr('A String\nAnother', 'postgres') == "E'A String\\nAnother'"
assert sqlrepr('A String\nAnother', 'sqlite') == "'A String\nAnother'"
def test_string_tab():
assert sqlrepr('A String\tAnother', 'postgres') == "E'A String\\tAnother'"
def test_string_r():
assert sqlrepr('A String\rAnother', 'postgres') == "E'A String\\rAnother'"
def test_string_b():
assert sqlrepr('A String\bAnother', 'postgres') == "E'A String\\bAnother'"
def test_string_000():
assert sqlrepr('A String\000Another', 'postgres') == "E'A String\\0Another'"
def test_string_():
assert sqlrepr('A String\tAnother', 'postgres') == "E'A String\\tAnother'"
assert sqlrepr('A String\'Another', 'firebird') == "'A String''Another'"
def test_simple_unicode():
assert sqlrepr(u'A String', 'postgres') == "'A String'"
def test_integer():
assert sqlrepr(10) == "10"
def test_float():
assert sqlrepr(10.01) == "10.01"
def test_none():
assert sqlrepr(None) == "NULL"
def test_list():
assert sqlrepr(['one','two','three'], 'postgres') == "('one', 'two', 'three')"
def test_tuple():
assert sqlrepr(('one','two','three'), 'postgres') == "('one', 'two', 'three')"
def test_bool():
assert sqlrepr(True, 'postgres') == "'t'"
assert sqlrepr(False, 'postgres') == "'f'"
assert sqlrepr(True, 'mysql') == "1"
assert sqlrepr(False, 'mysql') == "0"
def test_datetime():
from datetime import datetime, date, time
assert sqlrepr(datetime(2005, 7, 14, 13, 31, 2)) == "'2005-07-14 13:31:02.000000'"
assert sqlrepr(date(2005, 7, 14)) == "'2005-07-14'"
assert sqlrepr(time(13, 31, 2)) == "'13:31:02.000000'"
# now dates before 1900
assert sqlrepr(datetime(1428, 7, 14, 13, 31, 2)) == "'1428-07-14 13:31:02.000000'"
assert sqlrepr(date(1428, 7, 14)) == "'1428-07-14'"
def test_instance():
instance = TestClass()
assert sqlrepr(instance) == repr(instance)
def test_newstyle():
instance = NewTestClass()
assert sqlrepr(instance) == repr(instance)
def test_sqlexpr():
instance = SQLExpression()
assert sqlrepr(instance) == repr(instance)
def test_sqlobjectfield():
instance = SQLObjectField('test', 'test', 'test', None, None)
assert sqlrepr(instance) == repr(instance)
def test_select():
instance = Select('test')
assert sqlrepr(instance, 'mysql') == "SELECT test"
def test_insert():
# Single column, no keyword arguments.
instance = Insert('test', [('test',)])
assert sqlrepr(instance, 'mysql') == "INSERT INTO test VALUES ('test')"
# Multiple columns, no keyword arguments.
instance2 = Insert('test', [('1st', '2nd', '3th', '4th')])
assert sqlrepr(instance2, 'postgres') == "INSERT INTO test VALUES ('1st', '2nd', '3th', '4th')"
# Multiple rows, multiple columns, "valueList" keyword argument.
instance3 = Insert('test', valueList=[('a1', 'b1'), ('a2', 'b2'), ('a3', 'b3')])
assert sqlrepr(instance3, 'sqlite') == "INSERT INTO test VALUES ('a1', 'b1'), ('a2', 'b2'), ('a3', 'b3')"
# Multiple columns, "values" keyword argument.
instance4 = Insert('test', values=('v1', 'v2', 'v3'))
assert sqlrepr(instance4, 'mysql') == "INSERT INTO test VALUES ('v1', 'v2', 'v3')"
# Single column, "valueList" keyword argument.
instance5 = Insert('test', valueList=[('v1',)])
assert sqlrepr(instance5, 'mysql') == "INSERT INTO test VALUES ('v1')"
# Multiple rows, Multiple columns, template.
instance6 = Insert('test', valueList=[('a1', 'b1'), ('a2', 'b2')], template=['col1', 'col2'])
assert sqlrepr(instance6, 'mysql') == "INSERT INTO test (col1, col2) VALUES ('a1', 'b1'), ('a2', 'b2')"
# Multiple columns, implicit template (dictionary value).
instance7 = Insert('test', valueList=[{'col1': 'a1', 'col2': 'b1'}])
assert sqlrepr(instance7, 'mysql') == "INSERT INTO test (col1, col2) VALUES ('a1', 'b1')"
# Multiple rows, Multiple columns, implicit template.
instance8 = Insert('test', valueList=[{'col1': 'a1', 'col2': 'b1'},
{'col1': 'a2', 'col2': 'b2'}])
assert sqlrepr(instance8, 'mysql') == "INSERT INTO test (col1, col2) VALUES ('a1', 'b1'), ('a2', 'b2')"
def test_update():
instance = Update('test', {'test':'test'})
assert sqlrepr(instance, 'mysql') == "UPDATE test SET test='test'"
def test_delete():
instance = Delete('test', None)
assert sqlrepr(instance, 'mysql') == "DELETE FROM test"
def test_replace():
instance = Replace('test', {'test':'test'})
assert sqlrepr(instance, 'mysql') == "REPLACE test SET test='test'"
def test_trueclause():
instance = SQLTrueClauseClass()
assert sqlrepr(instance) == repr(instance)
def test_op():
instance = SQLOp('and', 'this', 'that')
assert sqlrepr(instance, 'mysql') == "(('this') AND ('that'))"
def test_call():
instance = SQLCall('test', ('test',))
assert sqlrepr(instance, 'mysql') == "'test'('test')"
def test_constant():
instance = SQLConstant('test')
assert sqlrepr(instance) == repr(instance)
def test_prefix():
instance = SQLPrefix('test', 'test')
assert sqlrepr(instance, 'mysql') == "test 'test'"
def test_dict():
assert sqlrepr({"key": "value"}, "sqlite") == "('key')"
def test_sets():
try:
set
except NameError:
pass
else:
assert sqlrepr(set([1])) == "(1)"
def test_timedelta():
assert sqlrepr(timedelta(seconds=30*60)) == \
"INTERVAL '0 days 1800 seconds'"
def test_quote_unquote_str():
assert quote_str('test%', 'postgres') == "'test%'"
assert quote_str('test%', 'sqlite') == "'test%'"
assert quote_str('test\%', 'postgres') == "E'test\\%'"
assert quote_str('test\\%', 'sqlite') == "'test\%'"
assert unquote_str("'test%'") == 'test%'
assert unquote_str("'test\\%'") == 'test\\%'
assert unquote_str("E'test\\%'") == 'test\\%'
def test_like_quoted():
assert sqlrepr(_LikeQuoted('test'), 'postgres') == "'test'"
assert sqlrepr(_LikeQuoted('test'), 'sqlite') == "'test'"
assert sqlrepr(_LikeQuoted('test%'), 'postgres') == r"E'test\\%'"
assert sqlrepr(_LikeQuoted('test%'), 'sqlite') == r"'test\%'"
|
fairness/algorithms/kamishima/kamfadm-2012ecmlpkdd/fadm/eval/_bin_class.py | yashwarlord/fairness-comparison | 146 | 12630538 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
import from 50b745c1d18d5c4b01d9d00e406b5fdaab3515ea @ KamLearn
Compute various statistics between estimated and correct classes in binary
cases
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
#==============================================================================
# Module metadata variables
#==============================================================================
#==============================================================================
# Imports
#==============================================================================
import logging
import numpy as np
#==============================================================================
# Public symbols
#==============================================================================
__all__ = ['BinClassStats']
#==============================================================================
# Constants
#==============================================================================
#==============================================================================
# Module variables
#==============================================================================
#==============================================================================
# Classes
#==============================================================================
class BinClassStats(object):
""" Compute various statistics of 2class sample data
Parameters
----------
tp : float
The number of True-Positives = n[1, 1]
fn : float
The number of False-Negatives = n[1, 0]
fp : float
The number of False-Positives = n[0, 1]
tn : float
The number of True-Negatives = n[0, 0]
Attributes
----------
n : array-like, shape=(2, 2), dtype=float
Contingency table of the correct and estimated samples. Rows and
columns correspond to the correct and the estimated samples.
c : array-like, shape(2, 0), dtype=float
Marginal counts of the correct(=true) samples
e : array-like, shape(2, 0), dtype=float
Marginal counts of the estimated samples
t : float
The number of total samples
"""
def __init__(self, tp, fn, fp, tn):
self.n = np.empty((2, 2))
self.n[1, 1] = float(tp)
self.n[1, 0] = float(fn)
self.n[0, 1] = float(fp)
self.n[0, 0] = float(tn)
self.c = np.sum(self.n, axis=1)
self.e = np.sum(self.n, axis=0)
self.t = np.sum(self.n)
if self.t <= 0.0 or np.any(self.n < 0.0) \
or np.any(np.isinf(self.n)) or np.any(np.isnan(self.n)):
raise ValueError("Illegal values are specified")
def negate(self):
""" negate the meanings of positives and negatives
"""
self.n[1, 1], self.n[0, 0] = self.n[0, 0], self.n[1, 1]
self.n[1, 0], self.n[0, 1] = self.n[0, 1], self.n[1, 0]
self.c = np.sum(self.n, axis=1)
self.e = np.sum(self.n, axis=0)
self.t = np.sum(self.n)
def ct(self):
""" Counts of contingency table elements
Returns
-------
tp : float
n[1, 1], the number of true positive samples
fn : float
n[1, 0], the number of false negative samples
fp : float
n[0, 1], the number of false positive samples
tn : float
n[0, 0], the number of true negative samples
"""
return self.n[1, 1], self.n[1, 0], self.n[0, 1], self.n[0, 0]
def str_ct(self, header=True):
""" Strings for ct()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
tp, fn, fp, tn = self.ct()
pr = []
if header:
pr.append("### Contingency Table ###")
pr.append("[ TP(1,1), FN(1,0) ] = [ %6.15g, %6.15g ]" % (tp, fn))
pr.append("[ FP(0,1), TN(0,0) ] = [ %6.15g, %6.15g ]" % (fp, tn))
return pr
def mct(self):
""" Marginal counts of contingency table elements
Returns
-------
cp : float
sum of correct positives
cn : float
sum of correct negatives
ep : float
sum of estimated positives
en : float
sum of estimated negatives
tc : float
total count
"""
return self.c[1], self.c[0], self.e[1], self.e[0], self.t
def str_mct(self, header=True):
""" Strings for mct()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
cp, cn, ep, en, t = self.mct()
pr = []
if header:
pr.append("### Marginal/Total Counts ###")
pr.append("True [ P, N ] = [ %6.15g, %6.15g ]" % (cp, cn))
pr.append("Est [ P, N ] = [ %6.15g, %6.15g ]" % (ep, en))
pr.append("Total = %.15g" % (t))
return pr
def acc(self):
""" Accuracy
Returns
-------
acc : float
accuracy
sd : float
s.d. of accuracy
"""
acc = (self.n[1, 1] + self.n[0, 0]) / self.t
sd = np.sqrt(acc * (1.0 - acc) / self.t)
return acc, sd
def str_acc(self, header=True):
""" Strings for acc()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
acc, sd = self.acc()
pr = []
if header:
pr.append("### Accuracy ###")
pr.append("Acc / S.D. = [ %.15g, %.15g ]" % (acc, sd))
return pr
def jaccard(self):
""" Jaccard / Dice coefficients
Returns
-------
jaccard : float
Jaccard coefficient
njaccard : float
Negated Jaccard coefficient
dice : float
Dice coefficient
ndice : float
Negated Dice coefficient
"""
jaccard = self.n[1, 1] / (self.t - self.n[0, 0])
njaccard = self.n[0, 0] / (self.t - self.n[1, 1])
dice = 2.0 * self.n[1, 1] / (self.c[1] + self.e[1])
ndice = 2.0 * self.n[0, 0] / (self.c[0] + self.e[0])
return jaccard, njaccard, dice, ndice
def str_jaccard(self, header=True):
""" Strings for jaccard()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
jaccard, njaccard, dice, ndice = self.jaccard()
pr = []
if header:
pr.append("### Jaccard / Dice Coefficients ###")
pr.append("Jaccard [ P, N ] = [ %.15g, %.15g ]" % (jaccard, njaccard))
pr.append("Dice [ P, N ] = [ %.15g, %.15g ]" % (dice, ndice))
return pr
def kldiv(self):
""" KL divergence
Returns
-------
kldivc : float
D( Correct || Estimated ) with natural log.
KL divergence from correct to estimated.
kldive : float
D( Estimated || Correct ) with natural log.
KL divergence from estimated to correct.
kldivc2 : float
D( Correct || Estimated ) with log2.
KL divergence from correct to estimated.
kldive2 : float
D( Estimated || Correct ) with log2.
KL divergence from estimated to correct.
"""
i = lambda n, m: 0.0 if n == 0.0 else \
np.inf if m == 0.0 else n * np.log(n / m)
kldivc = (i(self.c[0], self.e[0]) + i(self.c[1], self.e[1])) \
/ self.t
kldive = (i(self.e[0], self.c[0]) + i(self.e[1], self.c[1])) \
/ self.t
i2 = lambda n, m: 0.0 if n == 0.0 else \
np.inf if m == 0.0 else n * np.log2(n / m)
kldivc2 = (i2(self.c[0], self.e[0]) + i2(self.c[1], self.e[1])) \
/ self.t
kldive2 = (i2(self.e[0], self.c[0]) + i2(self.e[1], self.c[1])) \
/ self.t
return kldivc, kldive, kldivc2, kldive2
def str_kldiv(self, header=True):
""" Strings for kldiv()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
kldivc, kldive, kldivc2, kldive2 = self.kldiv()
pr = []
if header:
pr.append("### KL Divergence ###")
pr.append("[ D(C||E), D(E||C) ] with ln = [ %.15g, %.15g ]"
% (kldivc, kldive))
pr.append("[ D(C||E), D(E||C) ] with log2 = [ %.15g, %.15g ]"
% (kldivc2, kldive2))
return pr
def mi(self):
""" Mutual Information with natural log
Returns
-------
mi : float
I(C; E) = H(C) + H(E).- H(C, E) mutual information
nmic : float
I(C; E) / H(C). MI normalized by H(C)
nmie : float
I(C; E) / H(E). MI normalized by H(E)
amean : float
Arithmetic mean of two normalized mutual informations.
gmean : float
Geometric mean of two normalized mutual informations.
"""
# joint entropy of the pmf function n / sum(n)
en = lambda n: np.sum([0.0 if i == 0.0
else (-i / self.t) * np.log(i / self.t)
for i in np.ravel(n)])
hc = en(self.c)
he = en(self.e)
hj = en(self.n)
mi = np.max((0.0, hc + he - hj))
nmic = 1.0 if hc == 0.0 else mi / hc
nmie = 1.0 if he == 0.0 else mi / he
return mi, nmic, nmie, (nmic + nmie) / 2.0, np.sqrt(nmic * nmie)
def str_mi(self, header=True):
""" Strings for mi()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
mi, nmic, nmie, amean, gmean = self.mi()
pr = []
if header:
pr.append("### Mutual Information (natual log) ###")
pr.append("I(C;E) = %.15g" % (mi))
pr.append("[ I(C;E)/H(C), I(C;E)/H(E) ] = [ %.15g, %.15g ]" % \
(nmic, nmie))
pr.append("Arithmetic Mean = %.15g" % (amean))
pr.append("Geometric Mean = %.15g" % (gmean))
return pr
def mi2(self):
""" Mutual Information with log2
Returns
-------
mi : float
I(C; E) = H(C) + H(E).- H(C, E) mutual information
nmic : float
I(C; E) / H(C). MI normalized by H(C)
nmie : float
I(C; E) / H(E). MI normalized by H(E)
amean : float
Arithmetic mean of two normalized mutual informations.
gmean : float
Geometric mean of two normalized mutual informations.
"""
# joint entropy of the pmf function n / sum(n)
en = lambda n: np.sum([0.0 if i == 0.0
else (-i / self.t) * np.log2(i / self.t)
for i in np.ravel(n)])
hc = en(self.c)
he = en(self.e)
hj = en(self.n)
mi = np.max((0.0, hc + he - hj))
nmic = 1.0 if hc == 0.0 else mi / hc
nmie = 1.0 if he == 0.0 else mi / he
return mi, nmic, nmie, (nmic + nmie) / 2.0, np.sqrt(nmic * nmie)
def str_mi2(self, header=True):
""" Strings for mi2()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
mi, nmic, nmie, amean, gmean = self.mi2()
pr = []
if header:
pr.append("### Mutual Information (log2) ###")
pr.append("I(C;E) = %.15g" % (mi))
pr.append("[ I(C;E)/H(C), I(C;E)/H(E) ] = [ %.15g, %.15g ]" % \
(nmic, nmie))
pr.append("Arithmetic Mean = %.15g" % (amean))
pr.append("Geometric Mean = %.15g" % (gmean))
return pr
def prf(self, alpha=0.5):
""" Precision, recall, and F-measure
Parameters
----------
alpha : float, default=0.5
weight of precision in calculation of F-measures
Returns
p : float
Precision for a positive class
r : float
Recall for a positive class
f : float
F-measure for a positive class
"""
p = self.n[1, 1] / (self.n[1, 1] + self.n[0, 1])
r = self.n[1, 1] / (self.n[1, 1] + self.n[1, 0])
f = 1.0 / (alpha * (1.0 / p) + (1.0 - alpha) * (1.0 / r))
return p, r, f
def str_prf(self, alpha=0.5, header=True):
""" Strings for prf()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
p, r, f = self.prf()
pr = []
if header:
pr.append("### Precision, Recall, and F-measure ###")
pr.append("Precision = %.15g" % (p))
pr.append("Recall = %.15g" % (r))
pr.append("F-measure = %.15g" % (f))
return pr
def all(self):
""" all above statistics
Returns
-------
stats : float
list of all statistics
"""
stats = []
stats += self.ct()
stats += self.mct()
stats += self.acc()
stats += self.jaccard()
stats += self.kldiv()
stats += self.mi()
stats += self.mi2()
stats += self.prf()
return tuple(stats)
def str_all(self, header=True):
""" Strings for all()
Parameters
----------
header : boolean, default=True
include header info
Returns
-------
pr : list, type=str
list of message strings
"""
ret_str = ""
ret_str += "\n".join(self.str_ct(header)) + "\n\n"
ret_str += "\n".join(self.str_mct(header)) + "\n\n"
ret_str += "\n".join(self.str_acc(header)) + "\n\n"
ret_str += "\n".join(self.str_jaccard(header)) + "\n\n"
ret_str += "\n".join(self.str_kldiv(header)) + "\n\n"
ret_str += "\n".join(self.str_mi(header)) + "\n\n"
ret_str += "\n".join(self.str_mi2(header)) + "\n\n"
ret_str += "\n".join(self.str_prf(header))
return ret_str
#==============================================================================
# Functions
#==============================================================================
#==============================================================================
# Module initialization
#==============================================================================
# init logging system ---------------------------------------------------------
logger = logging.getLogger('fadm')
if not logger.handlers:
logger.addHandler(logging.NullHandler)
#==============================================================================
# Test routine
#==============================================================================
def _test():
""" test function for this module
"""
# perform doctest
import sys
import doctest
doctest.testmod()
sys.exit(0)
# Check if this is call as command script -------------------------------------
if __name__ == '__main__':
_test()
|
Z - Tool Box/x2john/known_hosts2john.py | dfirpaul/Active-Directory-Exploitation-Cheat-Sheet-1 | 1,290 | 12630540 | <filename>Z - Tool Box/x2john/known_hosts2john.py
#!/usr/bin/env python
# known_hosts2john processes input "known_hosts" files into a format suitable
# for use with JtR.
#
# This software is Copyright (c) 2014, <NAME> <dhiru [at] openwall.com>
#
# This code may be freely used and modified for any purpose.
import sys
def process_file(filename):
for line in open(filename, "rb"):
fields = line.strip().split(" ")
if not line.startswith("|1|"): # is this always the case?
sys.stderr.write("%s\n" % fields[0]) # Assume non-hashed entries; print as seed
continue
sys.stdout.write("$known_hosts$%s\n" % fields[0])
if __name__ == "__main__":
if len(sys.argv) < 2:
sys.stdout.write("Usage: known_hosts2john [known_hosts files]\n")
sys.exit(-1)
for i in range(1, len(sys.argv)):
process_file(sys.argv[i])
|
scripts/interleave-reads.py | sadeepdarshana/khmer | 558 | 12630564 | #! /usr/bin/env python
# This file is part of khmer, https://github.com/dib-lab/khmer/, and is
# Copyright (C) 2011-2015, Michigan State University.
# Copyright (C) 2015, The Regents of the University of California.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# * Neither the name of the Michigan State University nor the names
# of its contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Contact: <EMAIL>
# pylint: disable=invalid-name,missing-docstring
"""
Interleave left and right reads.
Take two files containing left & right reads from a paired-end sequencing run,
and interleave them.
% scripts/interleave-reads.py <R1> <R2> [ -o <outputfile> ]
By default, output is sent to stdout; or use -o. Use '-h' for parameter help.
"""
import screed
import sys
import textwrap
from khmer import __version__
from khmer.kfile import check_input_files, check_space
from khmer.khmer_args import sanitize_help, KhmerArgumentParser
from khmer.khmer_args import FileType as khFileType
from khmer.kfile import (add_output_compression_type, get_file_writer,
describe_file_handle)
from khmer.utils import (write_record_pair, check_is_left, check_is_right,
check_is_pair)
try:
from itertools import zip_longest
except ImportError:
from itertools import izip_longest as zip_longest
def get_parser():
epilog = """\
The output is an interleaved set of reads, with each read in <R1> paired
with a read in <R2>. By default, the output goes to stdout unless
:option:`-o`/:option:`--output` is specified.
As a "bonus", this file ensures that if read names are not already
formatted properly, they are reformatted consistently, such that
they look like the pre-1.8 Casava format (`@name/1`, `@name/2`).
This reformatting can be switched off with the
:option:`--no-reformat` flag.
Example::
interleave-reads.py tests/test-data/paired.fq.1 \\
tests/test-data/paired.fq.2 -o paired.fq"""
parser = KhmerArgumentParser(
description='Produce interleaved files from R1/R2 paired files',
epilog=textwrap.dedent(epilog))
parser.add_argument('left')
parser.add_argument('right')
parser.add_argument('-o', '--output', metavar="filename",
type=khFileType('wb'),
default=sys.stdout)
parser.add_argument('--no-reformat', default=False, action='store_true',
help='Do not reformat read names or enforce\
consistency')
parser.add_argument('-f', '--force', default=False, action='store_true',
help='Overwrite output file if it exists')
add_output_compression_type(parser)
return parser
def main():
args = sanitize_help(get_parser()).parse_args()
check_input_files(args.left, args.force)
check_input_files(args.right, args.force)
check_space([args.left, args.right], args.force)
s1_file = args.left
s2_file = args.right
print("Interleaving:\n\t%s\n\t%s" % (s1_file, s2_file), file=sys.stderr)
outfp = get_file_writer(args.output, args.gzip, args.bzip)
counter = 0
screed_iter_1 = screed.open(s1_file)
screed_iter_2 = screed.open(s2_file)
for read1, read2 in zip_longest(screed_iter_1, screed_iter_2):
if read1 is None or read2 is None:
print(("ERROR: Input files contain different number"
" of records."), file=sys.stderr)
sys.exit(1)
if counter % 100000 == 0:
print('...', counter, 'pairs', file=sys.stderr)
counter += 1
name1 = read1.name
name2 = read2.name
if not args.no_reformat:
if not check_is_left(name1):
name1 += '/1'
if not check_is_right(name2):
name2 += '/2'
read1.name = name1
read2.name = name2
if not check_is_pair(read1, read2):
print("ERROR: This doesn't look like paired data! "
"%s %s" % (read1.name, read2.name), file=sys.stderr)
sys.exit(1)
write_record_pair(read1, read2, outfp)
print('final: interleaved %d pairs' % counter, file=sys.stderr)
print('output written to', describe_file_handle(outfp), file=sys.stderr)
if __name__ == '__main__':
main()
|
util.py | darshanajaint/scene-representation-networks | 349 | 12630567 | import os, struct, math
import numpy as np
import torch
from glob import glob
import cv2
import torch.nn.functional as F
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
def get_latest_file(root_dir):
"""Returns path to latest file in a directory."""
list_of_files = glob.glob(os.path.join(root_dir, '*'))
latest_file = max(list_of_files, key=os.path.getctime)
return latest_file
def parse_comma_separated_integers(string):
return list(map(int, string.split(',')))
def convert_image(img):
if not isinstance(img, np.ndarray):
img = np.array(img.cpu().detach().numpy())
img = img.squeeze()
img = img.transpose(1,2,0)
img += 1.
img /= 2.
img *= 2**8 - 1
img = img.round().clip(0, 2**8-1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
def write_img(img, path):
cv2.imwrite(path, img.astype(np.uint8))
def in_out_to_param_count(in_out_tuples):
return np.sum([np.prod(in_out) + in_out[-1] for in_out in in_out_tuples])
def parse_intrinsics(filepath, trgt_sidelength=None, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
grid_barycenter = torch.Tensor(list(map(float, file.readline().split())))
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
if trgt_sidelength is not None:
cx = cx/width * trgt_sidelength
cy = cy/height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, world2cam_poses
def lin2img(tensor):
batch_size, num_samples, channels = tensor.shape
sidelen = np.sqrt(num_samples).astype(int)
return tensor.permute(0,2,1).view(batch_size, channels, sidelen, sidelen)
def num_divisible_by_2(number):
i = 0
while not number%2:
number = number // 2
i += 1
return i
def cond_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def load_pose(filename):
assert os.path.isfile(filename)
lines = open(filename).read().splitlines()
assert len(lines) == 4
lines = [[x[0],x[1],x[2],x[3]] for x in (x.split(" ") for x in lines)]
return torch.from_numpy(np.asarray(lines).astype(np.float32))
def normalize(img):
return (img - img.min()) / (img.max() - img.min())
def write_image(writer, name, img, iter):
writer.add_image(name, normalize(img.permute([0,3,1,2])), iter)
def print_network(net):
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("%d"%params)
def custom_load(model, path, discriminator=None, overwrite_embeddings=False, overwrite_renderer=False, optimizer=None):
if os.path.isdir(path):
checkpoint_path = sorted(glob(os.path.join(path, "*.pth")))[-1]
else:
checkpoint_path = path
whole_dict = torch.load(checkpoint_path)
if overwrite_embeddings:
del whole_dict['model']['latent_codes.weight']
if overwrite_renderer:
keys_to_remove = [key for key in whole_dict['model'].keys() if 'rendering_net' in key]
for key in keys_to_remove:
print(key)
whole_dict['model'].pop(key, None)
state = model.state_dict()
state.update(whole_dict['model'])
model.load_state_dict(state)
if discriminator:
discriminator.load_state_dict(whole_dict['discriminator'])
if optimizer:
optimizer.load_state_dict(whole_dict['optimizer'])
def custom_save(model, path, discriminator=None, optimizer=None):
whole_dict = {'model':model.state_dict()}
if discriminator:
whole_dict.update({'discriminator':discriminator.state_dict()})
if optimizer:
whole_dict.update({'optimizer':optimizer.state_dict()})
torch.save(whole_dict, path)
def show_images(images, titles=None):
"""Display a list of images in a single figure with matplotlib.
Parameters
---------
images: List of np.arrays compatible with plt.imshow.
cols (Default = 1): Number of columns in figure (number of rows is
set to np.ceil(n_images/float(cols))).
titles: List of titles corresponding to each image. Must have
the same length as titles.
"""
assert ((titles is None) or (len(images) == len(titles)))
cols = np.ceil(np.sqrt(len(images))).astype(int)
n_images = len(images)
if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]
fig = plt.figure()
for n, (image, title) in enumerate(zip(images, titles)):
a = fig.add_subplot(np.ceil(n_images / float(cols)), cols, n + 1)
im = a.imshow(image)
a.get_xaxis().set_visible(False)
a.get_yaxis().set_visible(False)
if len(images) < 10:
divider = make_axes_locatable(a)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(im, cax=cax, orientation='vertical')
plt.tight_layout()
# fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)
return fig
|
test/api/test_api_message.py | thenetcircle/dino | 150 | 12630615 | <gh_stars>100-1000
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from activitystreams import parse as as_parser
from dino import api
from test.base import BaseTest
class ApiMessageTest(BaseTest):
def test_send_message(self):
self.create_and_join_room()
act = self.activity_for_message()
response_data = api.on_message(act, as_parser(act))
self.assertEqual(200, response_data[0])
|
model.py | WangYueFt/prnet | 105 | 12630623 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import glob
import h5py
import copy
import math
import json
import numpy as np
from tqdm import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from sklearn.metrics import r2_score
from util import transform_point_cloud, npmat2euler, quat2mat
def clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
def attention(query, key, value, mask=None, dropout=None):
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1).contiguous()) / math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask==0, -1e9)
p_attn = F.softmax(scores, dim=-1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
def pairwise_distance(src, tgt):
inner = -2 * torch.matmul(src.transpose(2, 1).contiguous(), tgt)
xx = torch.sum(src**2, dim=1, keepdim=True)
yy = torch.sum(tgt**2, dim=1, keepdim=True)
distances = xx.transpose(2, 1).contiguous() + inner + yy
return torch.sqrt(distances)
def knn(x, k):
inner = -2 * torch.matmul(x.transpose(2, 1).contiguous(), x)
xx = torch.sum(x ** 2, dim=1, keepdim=True)
distance = -xx - inner - xx.transpose(2, 1).contiguous()
idx = distance.topk(k=k, dim=-1)[1] # (batch_size, num_points, k)
return idx
def get_graph_feature(x, k=20):
# x = x.squeeze()
x = x.view(*x.size()[:3])
idx = knn(x, k=k) # (batch_size, num_points, k)
batch_size, num_points, _ = idx.size()
device = torch.device('cuda')
idx_base = torch.arange(0, batch_size, device=device).view(-1, 1, 1) * num_points
idx = idx + idx_base
idx = idx.view(-1)
_, num_dims, _ = x.size()
x = x.transpose(2, 1).contiguous() # (batch_size, num_points, num_dims) -> (batch_size*num_points, num_dims) # batch_size * num_points * k + range(0, batch_size*num_points)
feature = x.view(batch_size * num_points, -1)[idx, :]
feature = feature.view(batch_size, num_points, k, num_dims)
x = x.view(batch_size, num_points, 1, num_dims).repeat(1, 1, k, 1)
feature = torch.cat((feature, x), dim=3).permute(0, 3, 1, 2)
return feature
def cycle_consistency(rotation_ab, translation_ab, rotation_ba, translation_ba):
batch_size = rotation_ab.size(0)
identity = torch.eye(3, device=rotation_ab.device).unsqueeze(0).repeat(batch_size, 1, 1)
return F.mse_loss(torch.matmul(rotation_ab, rotation_ba), identity) + F.mse_loss(translation_ab, -translation_ba)
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.generator(self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask))
class Generator(nn.Module):
def __init__(self, n_emb_dims):
super(Generator, self).__init__()
self.nn = nn.Sequential(nn.Linear(n_emb_dims, n_emb_dims//2),
nn.BatchNorm1d(n_emb_dims//2),
nn.ReLU(),
nn.Linear(n_emb_dims//2, n_emb_dims//4),
nn.BatchNorm1d(n_emb_dims//4),
nn.ReLU(),
nn.Linear(n_emb_dims//4, n_emb_dims//8),
nn.BatchNorm1d(n_emb_dims//8),
nn.ReLU())
self.proj_rot = nn.Linear(n_emb_dims//8, 4)
self.proj_trans = nn.Linear(n_emb_dims//8, 3)
def forward(self, x):
x = self.nn(x.max(dim=1)[0])
rotation = self.proj_rot(x)
translation = self.proj_trans(x)
rotation = rotation / torch.norm(rotation, p=2, dim=1, keepdim=True)
return rotation, translation
class Encoder(nn.Module):
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x-mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
return x + sublayer(self.norm(x))
class EncoderLayer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.0):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2).contiguous()
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.leaky_relu(self.w_1(x), negative_slope=0.2)))
class PointNet(nn.Module):
def __init__(self, n_emb_dims=512):
super(PointNet, self).__init__()
self.conv1 = nn.Conv1d(3, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv1d(64, 64, kernel_size=1, bias=False)
self.conv4 = nn.Conv1d(64, 128, kernel_size=1, bias=False)
self.conv5 = nn.Conv1d(128, n_emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm1d(64)
self.bn2 = nn.BatchNorm1d(64)
self.bn3 = nn.BatchNorm1d(64)
self.bn4 = nn.BatchNorm1d(128)
self.bn5 = nn.BatchNorm1d(n_emb_dims)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.relu(self.bn2(self.conv2(x)))
x = F.relu(self.bn3(self.conv3(x)))
x = F.relu(self.bn4(self.conv4(x)))
x = F.relu(self.bn5(self.conv5(x)))
return x
class DGCNN(nn.Module):
def __init__(self, n_emb_dims=512):
super(DGCNN, self).__init__()
self.conv1 = nn.Conv2d(6, 64, kernel_size=1, bias=False)
self.conv2 = nn.Conv2d(64*2, 64, kernel_size=1, bias=False)
self.conv3 = nn.Conv2d(64*2, 128, kernel_size=1, bias=False)
self.conv4 = nn.Conv2d(128*2, 256, kernel_size=1, bias=False)
self.conv5 = nn.Conv2d(512, n_emb_dims, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(256)
self.bn5 = nn.BatchNorm2d(n_emb_dims)
def forward(self, x):
batch_size, num_dims, num_points = x.size()
x = get_graph_feature(x)
x = F.leaky_relu(self.bn1(self.conv1(x)), negative_slope=0.2)
x1 = x.max(dim=-1, keepdim=True)[0]
x = get_graph_feature(x1)
x = F.leaky_relu(self.bn2(self.conv2(x)), negative_slope=0.2)
x2 = x.max(dim=-1, keepdim=True)[0]
x = get_graph_feature(x2)
x = F.leaky_relu(self.bn3(self.conv3(x)), negative_slope=0.2)
x3 = x.max(dim=-1, keepdim=True)[0]
x = get_graph_feature(x3)
x = F.leaky_relu(self.bn4(self.conv4(x)), negative_slope=0.2)
x4 = x.max(dim=-1, keepdim=True)[0]
x = torch.cat((x1, x2, x3, x4), dim=1)
x = F.leaky_relu(self.bn5(self.conv5(x)), negative_slope=0.2).view(batch_size, -1, num_points)
return x
class MLPHead(nn.Module):
def __init__(self, args):
super(MLPHead, self).__init__()
n_emb_dims = args.n_emb_dims
self.n_emb_dims = n_emb_dims
self.nn = nn.Sequential(nn.Linear(n_emb_dims*2, n_emb_dims//2),
nn.BatchNorm1d(n_emb_dims//2),
nn.ReLU(),
nn.Linear(n_emb_dims//2, n_emb_dims//4),
nn.BatchNorm1d(n_emb_dims//4),
nn.ReLU(),
nn.Linear(n_emb_dims//4, n_emb_dims//8),
nn.BatchNorm1d(n_emb_dims//8),
nn.ReLU())
self.proj_rot = nn.Linear(n_emb_dims//8, 4)
self.proj_trans = nn.Linear(n_emb_dims//8, 3)
def forward(self, *input):
src_embedding = input[0]
tgt_embedding = input[1]
embedding = torch.cat((src_embedding, tgt_embedding), dim=1)
embedding = self.nn(embedding.max(dim=-1)[0])
rotation = self.proj_rot(embedding)
rotation = rotation / torch.norm(rotation, p=2, dim=1, keepdim=True)
translation = self.proj_trans(embedding)
return quat2mat(rotation), translation
class Identity(nn.Module):
def __init__(self):
super(Identity, self).__init__()
def forward(self, *input):
return input
class Transformer(nn.Module):
def __init__(self, args):
super(Transformer, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.N = args.n_blocks
self.dropout = args.dropout
self.n_ff_dims = args.n_ff_dims
self.n_heads = args.n_heads
c = copy.deepcopy
attn = MultiHeadedAttention(self.n_heads, self.n_emb_dims)
ff = PositionwiseFeedForward(self.n_emb_dims, self.n_ff_dims, self.dropout)
self.model = EncoderDecoder(Encoder(EncoderLayer(self.n_emb_dims, c(attn), c(ff), self.dropout), self.N),
Decoder(DecoderLayer(self.n_emb_dims, c(attn), c(attn), c(ff), self.dropout), self.N),
nn.Sequential(),
nn.Sequential(),
nn.Sequential())
def forward(self, *input):
src = input[0]
tgt = input[1]
src = src.transpose(2, 1).contiguous()
tgt = tgt.transpose(2, 1).contiguous()
tgt_embedding = self.model(src, tgt, None, None).transpose(2, 1).contiguous()
src_embedding = self.model(tgt, src, None, None).transpose(2, 1).contiguous()
return src_embedding, tgt_embedding
class TemperatureNet(nn.Module):
def __init__(self, args):
super(TemperatureNet, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.temp_factor = args.temp_factor
self.nn = nn.Sequential(nn.Linear(self.n_emb_dims, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 1),
nn.ReLU())
self.feature_disparity = None
def forward(self, *input):
src_embedding = input[0]
tgt_embedding = input[1]
src_embedding = src_embedding.mean(dim=2)
tgt_embedding = tgt_embedding.mean(dim=2)
residual = torch.abs(src_embedding-tgt_embedding)
self.feature_disparity = residual
return torch.clamp(self.nn(residual), 1.0/self.temp_factor, 1.0*self.temp_factor), residual
class SVDHead(nn.Module):
def __init__(self, args):
super(SVDHead, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.cat_sampler = args.cat_sampler
self.reflect = nn.Parameter(torch.eye(3), requires_grad=False)
self.reflect[2, 2] = -1
self.temperature = nn.Parameter(torch.ones(1)*0.5, requires_grad=True)
self.my_iter = torch.ones(1)
def forward(self, *input):
src_embedding = input[0]
tgt_embedding = input[1]
src = input[2]
tgt = input[3]
batch_size, num_dims, num_points = src.size()
temperature = input[4].view(batch_size, 1, 1)
if self.cat_sampler == 'softmax':
d_k = src_embedding.size(1)
scores = torch.matmul(src_embedding.transpose(2, 1).contiguous(), tgt_embedding) / math.sqrt(d_k)
scores = torch.softmax(temperature*scores, dim=2)
elif self.cat_sampler == 'gumbel_softmax':
d_k = src_embedding.size(1)
scores = torch.matmul(src_embedding.transpose(2, 1).contiguous(), tgt_embedding) / math.sqrt(d_k)
scores = scores.view(batch_size*num_points, num_points)
temperature = temperature.repeat(1, num_points, 1).view(-1, 1)
scores = F.gumbel_softmax(scores, tau=temperature, hard=True)
scores = scores.view(batch_size, num_points, num_points)
else:
raise Exception('not implemented')
src_corr = torch.matmul(tgt, scores.transpose(2, 1).contiguous())
src_centered = src - src.mean(dim=2, keepdim=True)
src_corr_centered = src_corr - src_corr.mean(dim=2, keepdim=True)
H = torch.matmul(src_centered, src_corr_centered.transpose(2, 1).contiguous()).cpu()
R = []
for i in range(src.size(0)):
u, s, v = torch.svd(H[i])
r = torch.matmul(v, u.transpose(1, 0)).contiguous()
r_det = torch.det(r).item()
diag = torch.from_numpy(np.array([[1.0, 0, 0],
[0, 1.0, 0],
[0, 0, r_det]]).astype('float32')).to(v.device)
r = torch.matmul(torch.matmul(v, diag), u.transpose(1, 0)).contiguous()
R.append(r)
R = torch.stack(R, dim=0).cuda()
t = torch.matmul(-R, src.mean(dim=2, keepdim=True)) + src_corr.mean(dim=2, keepdim=True)
if self.training:
self.my_iter += 1
return R, t.view(batch_size, 3)
class KeyPointNet(nn.Module):
def __init__(self, num_keypoints):
super(KeyPointNet, self).__init__()
self.num_keypoints = num_keypoints
def forward(self, *input):
src = input[0]
tgt = input[1]
src_embedding = input[2]
tgt_embedding = input[3]
batch_size, num_dims, num_points = src_embedding.size()
src_norm = torch.norm(src_embedding, dim=1, keepdim=True)
tgt_norm = torch.norm(tgt_embedding, dim=1, keepdim=True)
src_topk_idx = torch.topk(src_norm, k=self.num_keypoints, dim=2, sorted=False)[1]
tgt_topk_idx = torch.topk(tgt_norm, k=self.num_keypoints, dim=2, sorted=False)[1]
src_keypoints_idx = src_topk_idx.repeat(1, 3, 1)
tgt_keypoints_idx = tgt_topk_idx.repeat(1, 3, 1)
src_embedding_idx = src_topk_idx.repeat(1, num_dims, 1)
tgt_embedding_idx = tgt_topk_idx.repeat(1, num_dims, 1)
src_keypoints = torch.gather(src, dim=2, index=src_keypoints_idx)
tgt_keypoints = torch.gather(tgt, dim=2, index=tgt_keypoints_idx)
src_embedding = torch.gather(src_embedding, dim=2, index=src_embedding_idx)
tgt_embedding = torch.gather(tgt_embedding, dim=2, index=tgt_embedding_idx)
return src_keypoints, tgt_keypoints, src_embedding, tgt_embedding
class ACPNet(nn.Module):
def __init__(self, args):
super(ACPNet, self).__init__()
self.n_emb_dims = args.n_emb_dims
self.num_keypoints = args.n_keypoints
self.num_subsampled_points = args.n_subsampled_points
self.logger = Logger(args)
if args.emb_nn == 'pointnet':
self.emb_nn = PointNet(n_emb_dims=self.n_emb_dims)
elif args.emb_nn == 'dgcnn':
self.emb_nn = DGCNN(n_emb_dims=self.n_emb_dims)
else:
raise Exception('Not implemented')
if args.attention == 'identity':
self.attention = Identity()
elif args.attention == 'transformer':
self.attention = Transformer(args=args)
else:
raise Exception("Not implemented")
self.temp_net = TemperatureNet(args)
if args.head == 'mlp':
self.head = MLPHead(args=args)
elif args.head == 'svd':
self.head = SVDHead(args=args)
else:
raise Exception('Not implemented')
if self.num_keypoints != self.num_subsampled_points:
self.keypointnet = KeyPointNet(num_keypoints=self.num_keypoints)
else:
self.keypointnet = Identity()
def forward(self, *input):
src, tgt, src_embedding, tgt_embedding, temperature, feature_disparity = self.predict_embedding(*input)
rotation_ab, translation_ab = self.head(src_embedding, tgt_embedding, src, tgt, temperature)
rotation_ba, translation_ba = self.head(tgt_embedding, src_embedding, tgt, src, temperature)
return rotation_ab, translation_ab, rotation_ba, translation_ba, feature_disparity
def predict_embedding(self, *input):
src = input[0]
tgt = input[1]
src_embedding = self.emb_nn(src)
tgt_embedding = self.emb_nn(tgt)
src_embedding_p, tgt_embedding_p = self.attention(src_embedding, tgt_embedding)
src_embedding = src_embedding + src_embedding_p
tgt_embedding = tgt_embedding + tgt_embedding_p
src, tgt, src_embedding, tgt_embedding = self.keypointnet(src, tgt, src_embedding, tgt_embedding)
temperature, feature_disparity = self.temp_net(src_embedding, tgt_embedding)
return src, tgt, src_embedding, tgt_embedding, temperature, feature_disparity
def predict_keypoint_correspondence(self, *input):
src, tgt, src_embedding, tgt_embedding, temperature, _ = self.predict_embedding(*input)
batch_size, num_dims, num_points = src.size()
d_k = src_embedding.size(1)
scores = torch.matmul(src_embedding.transpose(2, 1).contiguous(), tgt_embedding) / math.sqrt(d_k)
scores = scores.view(batch_size*num_points, num_points)
temperature = temperature.repeat(1, num_points, 1).view(-1, 1)
scores = F.gumbel_softmax(scores, tau=temperature, hard=True)
scores = scores.view(batch_size, num_points, num_points)
return src, tgt, scores
class PRNet(nn.Module):
def __init__(self, args):
super(PRNet, self).__init__()
self.num_iters = args.n_iters
self.logger = Logger(args)
self.discount_factor = args.discount_factor
self.acpnet = ACPNet(args)
self.model_path = args.model_path
self.feature_alignment_loss = args.feature_alignment_loss
self.cycle_consistency_loss = args.cycle_consistency_loss
if self.model_path is not '':
self.load(self.model_path)
if torch.cuda.device_count() > 1:
self.acpnet = nn.DataParallel(self.acpnet)
def forward(self, *input):
rotation_ab, translation_ab, rotation_ba, translation_ba, feature_disparity = self.acpnet(*input)
return rotation_ab, translation_ab, rotation_ba, translation_ba, feature_disparity
def predict(self, src, tgt, n_iters=3):
batch_size = src.size(0)
rotation_ab_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ab_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
for i in range(n_iters):
rotation_ab_pred_i, translation_ab_pred_i, rotation_ba_pred_i, translation_ba_pred_i, _ \
= self.forward(src, tgt)
rotation_ab_pred = torch.matmul(rotation_ab_pred_i, rotation_ab_pred)
translation_ab_pred = torch.matmul(rotation_ab_pred_i, translation_ab_pred.unsqueeze(2)).squeeze(2) \
+ translation_ab_pred_i
src = transform_point_cloud(src, rotation_ab_pred_i, translation_ab_pred_i)
return rotation_ab_pred, translation_ab_pred
def _train_one_batch(self, src, tgt, rotation_ab, translation_ab, opt):
opt.zero_grad()
batch_size = src.size(0)
identity = torch.eye(3, device=src.device).unsqueeze(0).repeat(batch_size, 1, 1)
rotation_ab_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ab_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
rotation_ba_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ba_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
total_loss = 0
total_feature_alignment_loss = 0
total_cycle_consistency_loss = 0
total_scale_consensus_loss = 0
for i in range(self.num_iters):
rotation_ab_pred_i, translation_ab_pred_i, rotation_ba_pred_i, translation_ba_pred_i, \
feature_disparity = self.forward(src, tgt)
rotation_ab_pred = torch.matmul(rotation_ab_pred_i, rotation_ab_pred)
translation_ab_pred = torch.matmul(rotation_ab_pred_i, translation_ab_pred.unsqueeze(2)).squeeze(2) \
+ translation_ab_pred_i
rotation_ba_pred = torch.matmul(rotation_ba_pred_i, rotation_ba_pred)
translation_ba_pred = torch.matmul(rotation_ba_pred_i, translation_ba_pred.unsqueeze(2)).squeeze(2) \
+ translation_ba_pred_i
loss = (F.mse_loss(torch.matmul(rotation_ab_pred.transpose(2, 1), rotation_ab), identity) \
+ F.mse_loss(translation_ab_pred, translation_ab)) * self.discount_factor**i
feature_alignment_loss = feature_disparity.mean() * self.feature_alignment_loss * self.discount_factor**i
cycle_consistency_loss = cycle_consistency(rotation_ab_pred_i, translation_ab_pred_i,
rotation_ba_pred_i, translation_ba_pred_i) \
* self.cycle_consistency_loss * self.discount_factor**i
scale_consensus_loss = 0
total_feature_alignment_loss += feature_alignment_loss
total_cycle_consistency_loss += cycle_consistency_loss
total_loss = total_loss + loss + feature_alignment_loss + cycle_consistency_loss + scale_consensus_loss
src = transform_point_cloud(src, rotation_ab_pred_i, translation_ab_pred_i)
total_loss.backward()
opt.step()
return total_loss.item(), total_feature_alignment_loss.item(), total_cycle_consistency_loss.item(), \
total_scale_consensus_loss, rotation_ab_pred, translation_ab_pred
def _test_one_batch(self, src, tgt, rotation_ab, translation_ab):
batch_size = src.size(0)
identity = torch.eye(3, device=src.device).unsqueeze(0).repeat(batch_size, 1, 1)
rotation_ab_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ab_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
rotation_ba_pred = torch.eye(3, device=src.device, dtype=torch.float32).view(1, 3, 3).repeat(batch_size, 1, 1)
translation_ba_pred = torch.zeros(3, device=src.device, dtype=torch.float32).view(1, 3).repeat(batch_size, 1)
total_loss = 0
total_feature_alignment_loss = 0
total_cycle_consistency_loss = 0
total_scale_consensus_loss = 0
for i in range(self.num_iters):
rotation_ab_pred_i, translation_ab_pred_i, rotation_ba_pred_i, translation_ba_pred_i, \
feature_disparity = self.forward(src, tgt)
rotation_ab_pred = torch.matmul(rotation_ab_pred_i, rotation_ab_pred)
translation_ab_pred = torch.matmul(rotation_ab_pred_i, translation_ab_pred.unsqueeze(2)).squeeze(2) \
+ translation_ab_pred_i
rotation_ba_pred = torch.matmul(rotation_ba_pred_i, rotation_ba_pred)
translation_ba_pred = torch.matmul(rotation_ba_pred_i, translation_ba_pred.unsqueeze(2)).squeeze(2) \
+ translation_ba_pred_i
loss = (F.mse_loss(torch.matmul(rotation_ab_pred.transpose(2, 1), rotation_ab), identity) \
+ F.mse_loss(translation_ab_pred, translation_ab)) * self.discount_factor ** i
feature_alignment_loss = feature_disparity.mean() * self.feature_alignment_loss * self.discount_factor ** i
cycle_consistency_loss = cycle_consistency(rotation_ab_pred_i, translation_ab_pred_i,
rotation_ba_pred_i, translation_ba_pred_i) \
* self.cycle_consistency_loss * self.discount_factor ** i
scale_consensus_loss = 0
total_feature_alignment_loss += feature_alignment_loss
total_cycle_consistency_loss += cycle_consistency_loss
total_loss = total_loss + loss + feature_alignment_loss + cycle_consistency_loss + scale_consensus_loss
src = transform_point_cloud(src, rotation_ab_pred_i, translation_ab_pred_i)
return total_loss.item(), total_feature_alignment_loss.item(), total_cycle_consistency_loss.item(), \
total_scale_consensus_loss, rotation_ab_pred, translation_ab_pred
def _train_one_epoch(self, epoch, train_loader, opt):
self.train()
total_loss = 0
rotations_ab = []
translations_ab = []
rotations_ab_pred = []
translations_ab_pred = []
eulers_ab = []
num_examples = 0
total_feature_alignment_loss = 0.0
total_cycle_consistency_loss = 0.0
total_scale_consensus_loss = 0.0
for data in tqdm(train_loader):
src, tgt, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba = [d.cuda()
for d in data]
loss, feature_alignment_loss, cycle_consistency_loss, scale_consensus_loss,\
rotation_ab_pred, translation_ab_pred = self._train_one_batch(src, tgt, rotation_ab, translation_ab,
opt)
batch_size = src.size(0)
num_examples += batch_size
total_loss = total_loss + loss * batch_size
total_feature_alignment_loss = total_feature_alignment_loss + feature_alignment_loss * batch_size
total_cycle_consistency_loss = total_cycle_consistency_loss + cycle_consistency_loss * batch_size
total_scale_consensus_loss = total_scale_consensus_loss + scale_consensus_loss * batch_size
rotations_ab.append(rotation_ab.detach().cpu().numpy())
translations_ab.append(translation_ab.detach().cpu().numpy())
rotations_ab_pred.append(rotation_ab_pred.detach().cpu().numpy())
translations_ab_pred.append(translation_ab_pred.detach().cpu().numpy())
eulers_ab.append(euler_ab.cpu().numpy())
avg_loss = total_loss / num_examples
avg_feature_alignment_loss = total_feature_alignment_loss / num_examples
avg_cycle_consistency_loss = total_cycle_consistency_loss / num_examples
avg_scale_consensus_loss = total_scale_consensus_loss / num_examples
rotations_ab = np.concatenate(rotations_ab, axis=0)
translations_ab = np.concatenate(translations_ab, axis=0)
rotations_ab_pred = np.concatenate(rotations_ab_pred, axis=0)
translations_ab_pred = np.concatenate(translations_ab_pred, axis=0)
eulers_ab = np.degrees(np.concatenate(eulers_ab, axis=0))
eulers_ab_pred = npmat2euler(rotations_ab_pred)
r_ab_mse = np.mean((eulers_ab-eulers_ab_pred)**2)
r_ab_rmse = np.sqrt(r_ab_mse)
r_ab_mae = np.mean(np.abs(eulers_ab-eulers_ab_pred))
t_ab_mse = np.mean((translations_ab-translations_ab_pred)**2)
t_ab_rmse = np.sqrt(t_ab_mse)
t_ab_mae = np.mean(np.abs(translations_ab-translations_ab_pred))
r_ab_r2_score = r2_score(eulers_ab, eulers_ab_pred)
t_ab_r2_score = r2_score(translations_ab, translations_ab_pred)
info = {'arrow': 'A->B',
'epoch': epoch,
'stage': 'train',
'loss': avg_loss,
'feature_alignment_loss': avg_feature_alignment_loss,
'cycle_consistency_loss': avg_cycle_consistency_loss,
'scale_consensus_loss': avg_scale_consensus_loss,
'r_ab_mse': r_ab_mse,
'r_ab_rmse': r_ab_rmse,
'r_ab_mae': r_ab_mae,
't_ab_mse': t_ab_mse,
't_ab_rmse': t_ab_rmse,
't_ab_mae': t_ab_mae,
'r_ab_r2_score': r_ab_r2_score,
't_ab_r2_score': t_ab_r2_score}
self.logger.write(info)
return info
def _test_one_epoch(self, epoch, test_loader):
self.eval()
total_loss = 0
rotations_ab = []
translations_ab = []
rotations_ab_pred = []
translations_ab_pred = []
eulers_ab = []
num_examples = 0
total_feature_alignment_loss = 0.0
total_cycle_consistency_loss = 0.0
total_scale_consensus_loss = 0.0
for data in tqdm(test_loader):
src, tgt, rotation_ab, translation_ab, rotation_ba, translation_ba, euler_ab, euler_ba = [d.cuda()
for d in data]
loss, feature_alignment_loss, cycle_consistency_loss, scale_consensus_loss, \
rotation_ab_pred, translation_ab_pred = self._test_one_batch(src, tgt, rotation_ab, translation_ab)
batch_size = src.size(0)
num_examples += batch_size
total_loss = total_loss + loss * batch_size
total_feature_alignment_loss = total_feature_alignment_loss + feature_alignment_loss * batch_size
total_cycle_consistency_loss = total_cycle_consistency_loss + cycle_consistency_loss * batch_size
total_scale_consensus_loss = total_scale_consensus_loss + scale_consensus_loss * batch_size
rotations_ab.append(rotation_ab.detach().cpu().numpy())
translations_ab.append(translation_ab.detach().cpu().numpy())
rotations_ab_pred.append(rotation_ab_pred.detach().cpu().numpy())
translations_ab_pred.append(translation_ab_pred.detach().cpu().numpy())
eulers_ab.append(euler_ab.cpu().numpy())
avg_loss = total_loss / num_examples
avg_feature_alignment_loss = total_feature_alignment_loss / num_examples
avg_cycle_consistency_loss = total_cycle_consistency_loss / num_examples
avg_scale_consensus_loss = total_scale_consensus_loss / num_examples
rotations_ab = np.concatenate(rotations_ab, axis=0)
translations_ab = np.concatenate(translations_ab, axis=0)
rotations_ab_pred = np.concatenate(rotations_ab_pred, axis=0)
translations_ab_pred = np.concatenate(translations_ab_pred, axis=0)
eulers_ab = np.degrees(np.concatenate(eulers_ab, axis=0))
eulers_ab_pred = npmat2euler(rotations_ab_pred)
r_ab_mse = np.mean((eulers_ab - eulers_ab_pred) ** 2)
r_ab_rmse = np.sqrt(r_ab_mse)
r_ab_mae = np.mean(np.abs(eulers_ab - eulers_ab_pred))
t_ab_mse = np.mean((translations_ab - translations_ab_pred) ** 2)
t_ab_rmse = np.sqrt(t_ab_mse)
t_ab_mae = np.mean(np.abs(translations_ab - translations_ab_pred))
r_ab_r2_score = r2_score(eulers_ab, eulers_ab_pred)
t_ab_r2_score = r2_score(translations_ab, translations_ab_pred)
info = {'arrow': 'A->B',
'epoch': epoch,
'stage': 'test',
'loss': avg_loss,
'feature_alignment_loss': avg_feature_alignment_loss,
'cycle_consistency_loss': avg_cycle_consistency_loss,
'scale_consensus_loss': avg_scale_consensus_loss,
'r_ab_mse': r_ab_mse,
'r_ab_rmse': r_ab_rmse,
'r_ab_mae': r_ab_mae,
't_ab_mse': t_ab_mse,
't_ab_rmse': t_ab_rmse,
't_ab_mae': t_ab_mae,
'r_ab_r2_score': r_ab_r2_score,
't_ab_r2_score': t_ab_r2_score}
self.logger.write(info)
return info
def save(self, path):
if torch.cuda.device_count() > 1:
torch.save(self.acpnet.module.state_dict(), path)
else:
torch.save(self.acpnet.state_dict(), path)
def load(self, path):
self.acpnet.load_state_dict(torch.load(path))
class Logger:
def __init__(self, args):
self.path = 'checkpoints/' + args.exp_name
self.fw = open(self.path+'/log', 'a')
self.fw.write(str(args))
self.fw.write('\n')
self.fw.flush()
print(str(args))
with open(os.path.join(self.path, 'args.txt'), 'w') as f:
json.dump(args.__dict__, f, indent=2)
def write(self, info):
arrow = info['arrow']
epoch = info['epoch']
stage = info['stage']
loss = info['loss']
feature_alignment_loss = info['feature_alignment_loss']
cycle_consistency_loss = info['cycle_consistency_loss']
scale_consensus_loss = info['scale_consensus_loss']
r_ab_mse = info['r_ab_mse']
r_ab_rmse = info['r_ab_rmse']
r_ab_mae = info['r_ab_mae']
t_ab_mse = info['t_ab_mse']
t_ab_rmse = info['t_ab_rmse']
t_ab_mae = info['t_ab_mae']
r_ab_r2_score = info['r_ab_r2_score']
t_ab_r2_score = info['t_ab_r2_score']
text = '%s:: Stage: %s, Epoch: %d, Loss: %f, Feature_alignment_loss: %f, Cycle_consistency_loss: %f, ' \
'Scale_consensus_loss: %f, Rot_MSE: %f, Rot_RMSE: %f, ' \
'Rot_MAE: %f, Rot_R2: %f, Trans_MSE: %f, ' \
'Trans_RMSE: %f, Trans_MAE: %f, Trans_R2: %f\n' % \
(arrow, stage, epoch, loss, feature_alignment_loss, cycle_consistency_loss, scale_consensus_loss,
r_ab_mse, r_ab_rmse, r_ab_mae,
r_ab_r2_score, t_ab_mse, t_ab_rmse, t_ab_mae, t_ab_r2_score)
self.fw.write(text)
self.fw.flush()
print(text)
def close(self):
self.fw.close()
if __name__ == '__main__':
print('hello world')
|
regtests/bench/copy_list.py | ahakingdom/Rusthon | 622 | 12630624 | '''copy list micro benchmark'''
from time import time
def main():
if PYTHON=='PYTHONJS':
pythonjs.configure( direct_operator='+' )
pythonjs.configure( direct_keys=True )
pass
a = list(range(1000))
times = []
for i in range(4):
t0 = time()
res = copy_list(a, 10000)
tk = time()
times.append(tk - t0)
avg = sum(times) / len(times)
print(avg)
def copy_list( a, n ):
x = []
for i in range(n):
b = a[:]
for j in range(10):
b.append( j )
x.append( b )
return x
|
bindings/python/cntk/io/tests/io_tests.py | shyamalschandra/CNTK | 17,702 | 12630625 | <filename>bindings/python/cntk/io/tests/io_tests.py
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import numpy as np
import cntk as C
import pytest
import sys
from cntk.io import MinibatchSource, CTFDeserializer, CBFDeserializer, \
StreamDefs, StreamDef, \
ImageDeserializer, Base64ImageDeserializer, \
FULL_DATA_SWEEP, INFINITELY_REPEAT, \
DEFAULT_RANDOMIZATION_WINDOW_IN_CHUNKS, \
sequence_to_cntk_text_format, UserMinibatchSource, StreamInformation, \
MinibatchData, UserDeserializer
from cntk.ops.tests.ops_test_utils import cntk_device
from cntk.logging import TraceLevel
import cntk.io.transforms as xforms
from cntk.cntk_py import to_dictionary, MinibatchSourceConfig
from cntk.core import Value
AA = np.asarray
MBDATA_DENSE_1 = r'''0 |S0 0 |S1 0
0 |S0 1 |S1 1
0 |S0 2
0 |S0 3 |S1 3
1 |S0 4
1 |S0 5 |S1 1
1 |S0 6 |S1 2
'''
MBDATA_DENSE_2 = r'''0 |S0 0 |S1 0
0 |S0 1 |S1 1
0 |S0 2
0 |S0 3 |S1 3
0 |S0 4
0 |S0 5 |S1 1
0 |S0 6 |S1 2
'''
MBDATA_SPARSE = r'''0 |x 560:1 |y 1 0 0 0 0
0 |x 0:1
0 |x 0:1
1 |x 560:1 |y 0 1 0 0 0
1 |x 0:1
1 |x 0:1
1 |x 424:1
'''
MBDATA_SPARSE1 = r'''0 |x 560:1
0 |x 0:1
0 |x 0:1
1 |x 560:1
1 |x 0:1
1 |x 0:1
1 |x 424:1
'''
MBDATA_SPARSE2 = r'''0 |y 1 0 0 0 0
1 |y 0 1 0 0 0
'''
def create_temp_file(tmpdir):
tmpfile = str(tmpdir/'mbtest.txt')
with open(tmpfile, 'w') as f:
f.write("|S0 1\n|S0 2\n|S0 3\n|S0 4")
return tmpfile
def create_ctf_deserializer(tmpdir):
tmpfile = create_temp_file(tmpdir)
return CTFDeserializer(tmpfile, StreamDefs(features=StreamDef(field='S0', shape=1)))
def create_config(tmpdir):
tmpfile = create_temp_file(tmpdir)
return MinibatchSourceConfig() \
.add_deserializer(
CTFDeserializer(tmpfile,
StreamDefs(features=StreamDef(field='S0', shape=1))))
def _write_data(tmpdir, data, filename='mbdata.txt'):
tmpfile = str(tmpdir / filename)
with open(tmpfile, 'w') as f:
f.write(data)
return tmpfile
def test_text_format(tmpdir):
tmpfile = _write_data(tmpdir, MBDATA_SPARSE)
input_dim = 1000
num_output_classes = 5
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features=StreamDef(field='x', shape=input_dim, is_sparse=True),
labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False)
)), randomize=False)
assert isinstance(mb_source, MinibatchSource)
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(7)
features = mb[features_si]
# 2 samples, max seq len 4, 1000 dim
assert features.shape == (2, 4, input_dim)
assert features.end_of_sweep
assert features.num_sequences == 2
assert features.num_samples == 7
assert features.is_sparse
labels = mb[labels_si]
# 2 samples, max seq len 1, 5 dim
assert labels.shape == (2, 1, num_output_classes)
assert labels.end_of_sweep
assert labels.num_sequences == 2
assert labels.num_samples == 2
assert not labels.is_sparse
label_data = labels.asarray()
assert np.allclose(label_data,
np.asarray([
[[1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0.]]
]))
mb = mb_source.next_minibatch(1)
features = mb[features_si]
labels = mb[labels_si]
assert not features.end_of_sweep
assert not labels.end_of_sweep
assert features.num_samples < 7
assert labels.num_samples == 1
def check_default_config_keys(d):
assert 5 <= len(d.keys())
assert d['frameMode'] is False
assert d['multiThreadedDeserialization'] is False
assert TraceLevel.Warning == d['traceLevel']
assert 'randomize' in d.keys()
assert 'deserializers' in d.keys()
def test_minibatch_source_config_constructor(tmpdir):
ctf = create_ctf_deserializer(tmpdir)
config = MinibatchSourceConfig([ctf], False)
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert 5 == len(dictionary.keys())
assert dictionary['randomize'] is False
config = MinibatchSourceConfig([ctf], True)
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert 8 == len(dictionary.keys())
assert dictionary['randomize'] is True
assert DEFAULT_RANDOMIZATION_WINDOW_IN_CHUNKS == dictionary['randomizationWindow']
assert False == dictionary['sampleBasedRandomizationWindow']
config = MinibatchSourceConfig([ctf]) # 'randomize' is omitted
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert 8 == len(dictionary.keys())
assert dictionary['randomize'] is True
assert DEFAULT_RANDOMIZATION_WINDOW_IN_CHUNKS == dictionary['randomizationWindow']
assert False == dictionary['sampleBasedRandomizationWindow']
def test_minibatch_source_config_sweeps_and_samples(tmpdir):
ctf = create_ctf_deserializer(tmpdir)
config = MinibatchSourceConfig([ctf])
assert INFINITELY_REPEAT == config.max_samples
assert INFINITELY_REPEAT == config.max_sweeps
config.max_samples = 100
config.max_sweeps = 3
assert 100 == config.max_samples
assert 3 == config.max_sweeps
with pytest.raises(Exception):
# to_dictionary will validate the config
dictionary = to_dictionary(config)
config.max_samples = INFINITELY_REPEAT
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
def test_minibatch_source_config_randomization(tmpdir):
ctf = create_ctf_deserializer(tmpdir)
config = MinibatchSourceConfig([ctf])
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert dictionary['randomize'] is True
config.randomization_window_in_chunks = 0
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert dictionary['randomize'] is False
config.randomization_window_in_chunks = 10
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert dictionary['randomize'] is True
assert 10 == dictionary['randomizationWindow']
assert dictionary['sampleBasedRandomizationWindow'] is False
config.randomization_window_in_samples = 100
with pytest.raises(Exception):
# to_dictionary will validate the config
dictionary = to_dictionary(config)
config.randomization_window_in_chunks = 0
dictionary = to_dictionary(config)
check_default_config_keys(dictionary)
assert dictionary['randomize'] is True
assert 100 == dictionary['randomizationWindow']
assert dictionary['sampleBasedRandomizationWindow'] is True
def test_minibatch_source_config_other_properties(tmpdir):
ctf = create_ctf_deserializer(tmpdir)
config = MinibatchSourceConfig([ctf])
config.is_multithreaded.set(True)
config.trace_level = TraceLevel.Info.value
config.is_frame_mode_enabled = True
dictionary = to_dictionary(config)
assert 8 == len(dictionary.keys())
assert TraceLevel.Info == dictionary['traceLevel']
assert dictionary['frameMode'] is True
assert dictionary['multiThreadedDeserialization'] is True
config.is_multithreaded.set(False)
config.trace_level = 0
config.truncation_length = 123
with pytest.raises(Exception):
# to_dictionary will validate the config
dictionary = to_dictionary(config)
config.is_frame_mode_enabled = False
dictionary = to_dictionary(config)
assert 10 == len(dictionary.keys())
assert 0 == dictionary['traceLevel']
assert dictionary['frameMode'] is False
assert dictionary['multiThreadedDeserialization'] is False
assert dictionary['truncated'] is True
assert 123 == dictionary['truncationLength']
def test_image(tmpdir):
map_file = "input.txt"
mean_file = "mean.txt"
feature_name = "f"
image_width = 100
image_height = 200
num_channels = 3
label_name = "l"
num_classes = 7
transforms = [
xforms.crop(crop_type='randomside', side_ratio=0.5,
jitter_type='uniratio'),
xforms.scale(width=image_width, height=image_height,
channels=num_channels, interpolations='linear'),
xforms.mean(mean_file)]
defs = StreamDefs(f=StreamDef(field='image', transforms=transforms),
l=StreamDef(field='label', shape=num_classes))
image = ImageDeserializer(map_file, defs)
config = to_dictionary(MinibatchSourceConfig([image], randomize=False))
# Multithreading should be on by default for the ImageDeserializer.
assert config['multiThreadedDeserialization'] is True
assert len(config['deserializers']) == 1
d = config['deserializers'][0]
assert d['type'] == 'ImageDeserializer'
assert d['file'] == map_file
assert set(d['input'].keys()) == {label_name, feature_name}
l = d['input'][label_name]
assert l['labelDim'] == num_classes
f = d['input'][feature_name]
assert set(f.keys()) == {'transforms'}
t0, t1, t2, _ = f['transforms']
assert t0['type'] == 'Crop'
assert t1['type'] == 'Scale'
assert t2['type'] == 'Mean'
assert t0['cropType'] == 'randomside'
assert t0['cropSize'] == '0:0'
assert t0['sideRatio'] == '0.5:0.5'
assert t0['aspectRatio'] == '1:1'
assert t0['areaRatio'] == '0:0'
assert t0['jitterType'] == 'uniratio'
assert t1['width'] == image_width
assert t1['height'] == image_height
assert t1['channels'] == num_channels
assert t1['interpolations'] == 'linear'
assert t2['meanFile'] == mean_file
config = to_dictionary(MinibatchSourceConfig([image, image]))
assert len(config['deserializers']) == 2
ctf = create_ctf_deserializer(tmpdir)
config = to_dictionary(MinibatchSourceConfig([image, ctf, image]))
# Multithreading should still be enabled.
assert config['multiThreadedDeserialization'] is True
assert len(config['deserializers']) == 3
# TODO depends on ImageReader.dll
'''
mbs = config.create_minibatch_source()
sis = mbs.stream_infos()
assert set(sis.keys()) == { feature_name, label_name }
'''
def test_image_with_crop_range():
map_file = "input.txt"
feature_name = "f"
image_width = 100
image_height = 200
num_channels = 3
label_name = "l"
num_classes = 7
transforms = [
xforms.crop(crop_type='randomside',
crop_size=(512,424), side_ratio=(0.2, 0.5), area_ratio=(0.1, 0.75), aspect_ratio=(0.3, 0.8),
jitter_type='uniratio')
]
defs = StreamDefs(f=StreamDef(field='image', transforms=transforms),
l=StreamDef(field='label', shape=num_classes))
image = ImageDeserializer(map_file, defs)
config = to_dictionary(MinibatchSourceConfig([image], randomize=False))
assert len(config['deserializers']) == 1
d = config['deserializers'][0]
assert d['type'] == 'ImageDeserializer'
assert d['file'] == map_file
assert set(d['input'].keys()) == {label_name, feature_name}
l = d['input'][label_name]
assert l['labelDim'] == num_classes
f = d['input'][feature_name]
assert set(f.keys()) == {'transforms'}
t0, _ = f['transforms']
assert t0['type'] == 'Crop'
assert t0['cropType'] == 'randomside'
assert t0['cropSize'] == '512:424'
assert t0['sideRatio'] == '0.2:0.5'
assert t0['aspectRatio'] == '0.3:0.8'
assert t0['areaRatio'] == '0.1:0.75'
assert t0['jitterType'] == 'uniratio'
config = to_dictionary(MinibatchSourceConfig([image, image]))
assert len(config['deserializers']) == 2
config = to_dictionary(MinibatchSourceConfig([image, image, image]))
assert len(config['deserializers']) == 3
def test_full_sweep_minibatch(tmpdir):
tmpfile = _write_data(tmpdir, MBDATA_DENSE_1)
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features = StreamDef(field='S0', shape=1),
labels = StreamDef(field='S1', shape=1))),
randomization_window_in_chunks=0, max_sweeps=1)
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(1000)
assert mb[features_si].num_sequences == 2
assert mb[labels_si].num_sequences == 2
features = mb[features_si]
assert features.end_of_sweep
assert len(features.as_sequences()) == 2
expected_features = \
[
[[0], [1], [2], [3]],
[[4], [5], [6]]
]
for res, exp in zip(features.as_sequences(), expected_features):
assert np.allclose(res, exp)
assert np.allclose(features.data.mask,
[[2, 1, 1, 1],
[2, 1, 1, 0]])
labels = mb[labels_si]
assert labels.end_of_sweep
assert len(labels.as_sequences()) == 2
expected_labels = \
[
[[0],[1],[3]],
[[1],[2]]
]
for res, exp in zip(labels.as_sequences(), expected_labels):
assert np.allclose(res, exp)
assert np.allclose(labels.data.mask,
[[2, 1, 1],
[2, 1, 0]])
def test_max_samples(tmpdir):
mb_source = MinibatchSource(
create_ctf_deserializer(tmpdir), max_samples=1)
input_map = {'features': mb_source['features']}
mb = mb_source.next_minibatch(10, input_map)
assert 'features' in mb
assert mb['features'].num_samples == 1
assert not mb['features'].end_of_sweep
mb = mb_source.next_minibatch(10, input_map)
assert not mb
def test_max_sweeps(tmpdir):
# set max sweeps to 3 (12 samples altogether).
mb_source = MinibatchSource(
create_ctf_deserializer(tmpdir), max_sweeps=3)
input_map = {'features': mb_source['features']}
for i in range(2):
mb = mb_source.next_minibatch(5, input_map)
assert 'features' in mb
assert mb['features'].num_samples == 5
assert mb['features'].end_of_sweep
mb = mb_source.next_minibatch(5, input_map)
assert 'features' in mb
assert mb['features'].num_samples == 2
assert mb['features'].end_of_sweep
mb = mb_source.next_minibatch(1, input_map)
assert not mb
def test_max_samples_over_several_sweeps(tmpdir):
mb_source = MinibatchSource(
create_ctf_deserializer(tmpdir), max_samples=11)
input_map = {'features': mb_source['features']}
for i in range(2):
mb = mb_source.next_minibatch(5, input_map)
assert 'features' in mb
assert mb['features'].num_samples == 5
assert mb['features'].end_of_sweep
mb = mb_source.next_minibatch(5, input_map)
assert 'features' in mb
assert mb['features'].num_samples == 1
assert not mb['features'].end_of_sweep
mb = mb_source.next_minibatch(1, input_map)
assert not mb
def test_one_sweep(tmpdir):
ctf = create_ctf_deserializer(tmpdir)
sources = [MinibatchSource(ctf, max_sweeps=1),
MinibatchSource(ctf, max_samples=FULL_DATA_SWEEP),
MinibatchSource(ctf, max_sweeps=1, max_samples=INFINITELY_REPEAT),
MinibatchSource(ctf, max_samples=FULL_DATA_SWEEP, max_sweeps=INFINITELY_REPEAT)]
for source in sources:
input_map = {'features': source['features']}
mb = source.next_minibatch(100, input_map)
assert 'features' in mb
assert mb['features'].num_samples == 4
assert mb['features'].end_of_sweep
mb = source.next_minibatch(100, input_map)
assert not mb
def test_random_seed(tmpdir):
ctf = create_ctf_deserializer(tmpdir)
sources = [MinibatchSource(ctf),
MinibatchSource(ctf, randomization_seed=123),
MinibatchSource(ctf, randomization_seed=0),
MinibatchSource(ctf, randomization_seed=1)]
data = []
for source in sources:
input_map = {'features': source['features']}
mb = source.next_minibatch(100, input_map)
data.append(mb['features'].asarray())
assert not (data[0] == data[1]).all()
assert (data[0] == data[2]).all()
# after the first sweep (= 4 samples), the first reader is seeded
# with 1, and should produce results identical to the last reader.
assert (data[0][4:] == data[3][:-4]).all()
def test_large_minibatch(tmpdir):
tmpfile = _write_data(tmpdir, MBDATA_DENSE_2)
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features = StreamDef(field='S0', shape=1),
labels = StreamDef(field='S1', shape=1))),
randomization_window_in_chunks=0)
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(1000)
features = mb[features_si]
labels = mb[labels_si]
# Actually, the minibatch spans over multiple sweeps,
# not sure if this is an artificial situation, but
# maybe instead of a boolean flag we should indicate
# the largest sweep index the data was taken from.
assert features.end_of_sweep
assert labels.end_of_sweep
assert features.num_samples == 1000 - 1000 % 7
assert labels.num_samples == 5 * (1000 // 7)
assert mb[features_si].num_sequences == (1000 // 7)
assert mb[labels_si].num_sequences == (1000 // 7)
@pytest.mark.parametrize("idx, alias_tensor_map, expected", [
(0, {'A': [object()]}, ValueError),
])
def test_sequence_conversion_exceptions(idx, alias_tensor_map, expected):
with pytest.raises(expected):
sequence_to_cntk_text_format(idx, alias_tensor_map)
@pytest.mark.parametrize("idx, alias_tensor_map, expected", [
(0, {'W': AA([])}, ""),
(0, {'W': AA([[[1, 0, 0, 0], [1, 0, 0, 0]]])}, """\
0\t|W 1 0 0 0 1 0 0 0\
"""),
(0, {
'W': AA([[[1, 0, 0, 0], [1, 0, 0, 0]]]),
'L': AA([[[2]]])
},
"""\
0\t|L 2 |W 1 0 0 0 1 0 0 0\
"""),
(0, {
'W': AA([[[1, 0], [1, 0]], [[5, 6], [7, 8]]]),
'L': AA([[[2]]])
},
"""\
0\t|L 2 |W 1 0 1 0
0\t|W 5 6 7 8"""),
])
def test_sequence_conversion_dense(idx, alias_tensor_map, expected):
assert sequence_to_cntk_text_format(idx, alias_tensor_map) == expected
@pytest.mark.parametrize("data, expected", [
([1], True),
([[1, 2]], True),
([[AA([1, 2])]], False),
([AA([1, 2])], False),
([AA([1, 2]), AA([])], False),
])
def test_is_tensor(data, expected):
from cntk.io import _is_tensor
assert _is_tensor(data) == expected
def test_create_two_image_deserializers(tmpdir):
mbdata = r'''filename 0
filename2 0
'''
map_file = str(tmpdir / 'mbdata.txt')
with open(map_file, 'w') as f:
f.write(mbdata)
image_width = 100
image_height = 200
num_channels = 3
transforms = [xforms.crop(crop_type='randomside', side_ratio=0.5,
jitter_type='uniratio'),
xforms.scale(width=image_width, height=image_height,
channels=num_channels, interpolations='linear')]
image1 = ImageDeserializer(
map_file, StreamDefs(f1=StreamDef(field='image',
transforms=transforms)))
image2 = ImageDeserializer(
map_file, StreamDefs(f2=StreamDef(field='image',
transforms=transforms)))
mb_source = MinibatchSource([image1, image2])
assert isinstance(mb_source, MinibatchSource)
def test_base64_image_deserializer(tmpdir):
import io, base64, uuid; from PIL import Image
images, b64_images = [], []
np.random.seed(1)
for i in range(10):
data = np.random.randint(0, 2**8, (5,7,3))
image = Image.fromarray(data.astype('uint8'), "RGB")
buf = io.BytesIO()
image.save(buf, format='PNG')
assert image.width == 7 and image.height == 5
b64_images.append(base64.b64encode(buf.getvalue()))
images.append(np.array(image))
image_data = str(tmpdir / 'mbdata1.txt')
seq_ids = []
uid = uuid.uuid1().int >> 64
with open(image_data, 'wb') as f:
for i,data in enumerate(b64_images):
seq_id = uid ^ i
seq_id = str(seq_id).encode('ascii')
seq_ids.append(seq_id)
line = seq_id + b'\t'
label = str(i).encode('ascii')
line += label + b'\t' + data + b'\n'
f.write(line)
ctf_data = str(tmpdir / 'mbdata2.txt')
with open(ctf_data, 'wb') as f:
for i, sid in enumerate(seq_ids):
line = sid + b'\t' + b'|index '+str(i).encode('ascii') + b'\n'
f.write(line)
transforms = [xforms.scale(width=7, height=5, channels=3)]
b64_deserializer = Base64ImageDeserializer(image_data,
StreamDefs(
images=StreamDef(field='image', transforms=transforms),
labels=StreamDef(field='label', shape=10)))
ctf_deserializer = CTFDeserializer(ctf_data,
StreamDefs(index=StreamDef(field='index', shape=1)))
mb_source = MinibatchSource([ctf_deserializer, b64_deserializer])
assert isinstance(mb_source, MinibatchSource)
for j in range(100):
mb = mb_source.next_minibatch(10)
index_stream = mb_source.streams['index']
index = mb[index_stream].asarray().flatten()
image_stream = mb_source.streams['images']
results = mb[image_stream].asarray()
for i in range(10):
# original images are RBG, openCV produces BGR images,
# reverse the last dimension of the original images
bgrImage = images[int(index[i])][:,:,::-1]
# transposing to get CHW representation
bgrImage = np.transpose(bgrImage, (2, 0, 1))
assert (bgrImage == results[i][0]).all()
class MyDataSource(UserMinibatchSource):
def __init__(self, f_dim, l_dim):
self.f_dim, self.l_dim = f_dim, l_dim
self.fsi = StreamInformation("features", 0, 'sparse', np.float32, (self.f_dim,))
self.lsi = StreamInformation("labels", 1, 'dense', np.float32, (self.l_dim,))
# MBDATA_SPARSE fits into memory we will, so we will read it in all at
# once. It follows the CNTKTextFormat:
# sequence ID |feature1 data |feature2 data
# where in this case feature1's data is encoded as one-hot and we will
# convert to CSR, and feature2's data is a one-hot encoded as dense.
# We will store
# sequence id -> "features" -> list of features
# and
# sequence id -> "labels" -> label
self.data = {}
for line in MBDATA_SPARSE.split('\n'):
line = line.strip()
if not line:
continue
seq_id, data = line.split('|', 1)
data = data.split("|")
seq_id = int(seq_id.strip())
if seq_id not in self.data:
self.data[seq_id] = {'features': []}
# Processing features - expecting one per line.
# We accumulate the vocabulary indices and convert them into a
# Value object when requested in next_minibatch()
features = data[0].split(" ")
assert features[0] == 'x'
vocab_idx = int(features[1].split(":")[0])
self.data[seq_id]['features'].append(vocab_idx)
# Process label, if exists
if len(data) == 2:
# Only one label definition per sequence allowed
assert 'labels' not in self.data[seq_id]
labels = data[1].split(" ")
assert labels[0] == 'y'
# We don't have many label classes, and only one label per
# sequence, so we just read it in as dense, all at once.
val = np.asarray([labels[1:]], dtype=np.float32)
self.data[seq_id]['labels'] = val
self.sequences = sorted(self.data)
self.next_seq_idx = 0
super(MyDataSource, self).__init__()
def stream_infos(self):
return [self.fsi, self.lsi]
def next_minibatch(self, num_samples, number_of_workers, worker_rank, device=None):
features = []
labels = []
sweep_end = False
f_sample_count = 0
l_sample_count = 0
while max(f_sample_count, l_sample_count) < num_samples:
if self.next_seq_idx == len(self.sequences):
sweep_end = True
self.next_seq_idx = 0
seq_id = self.sequences[self.sequences[self.next_seq_idx]]
f_data = self.data[seq_id]['features']
l_data = self.data[seq_id]['labels']
if (features or labels) and max(f_sample_count+len(f_data), l_sample_count+len(l_data)) > num_samples:
break
f_sample_count += len(f_data)
features.append(f_data)
l_sample_count += len(l_data)
labels.append(l_data)
self.next_seq_idx += 1
num_seq = len(features)
f_data = Value.one_hot(batch=features, num_classes=self.f_dim)
l_data = Value(batch=np.asarray(labels, dtype=np.float32))
result = {
self.fsi: MinibatchData(f_data, num_seq, f_sample_count, sweep_end),
self.lsi: MinibatchData(l_data, num_seq, l_sample_count, sweep_end)
}
return result
class MyDataSourceWithCheckpoint(MyDataSource):
def __init__(self, f_dim, l_dim):
super(MyDataSourceWithCheckpoint, self).__init__(f_dim, l_dim)
self._restore_from_checkpoint_calls = 0
def get_checkpoint_state(self):
return {'test': 12}
def restore_from_checkpoint(self, state):
self._restore_from_checkpoint_calls += 1
assert state == {'test': 12}
def test_usermbsource(tmpdir):
tmpfile = _write_data(tmpdir, MBDATA_SPARSE)
input_dim = 1000
num_output_classes = 5
# Setting up the native MB source as the ground truth
n_mb_source = CTFDeserializer(tmpfile, StreamDefs(
features=StreamDef(field='x', shape=input_dim, is_sparse=True),
labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False)
))
n_mb_source = MinibatchSource(n_mb_source, randomize=False)
n_features_si = n_mb_source['features']
n_labels_si = n_mb_source['labels']
n_mb = n_mb_source.next_minibatch(2)
n_features = n_mb[n_features_si]
n_labels = n_mb[n_labels_si]
# Setting up the user MB source
u_mb_source = MyDataSource(input_dim, num_output_classes)
u_features_si = u_mb_source['features']
u_labels_si = u_mb_source['labels']
u_mb = u_mb_source.next_minibatch(2, 1, 0)
u_features = u_mb[u_features_si]
u_labels = u_mb[u_labels_si]
assert u_features.shape == n_features.shape == (1, 3, 1000)
assert u_features.end_of_sweep == n_features.end_of_sweep
assert u_features.num_sequences == n_features.num_sequences
assert u_features.num_samples == n_features.num_samples
assert u_features.is_sparse == n_features.is_sparse
assert u_labels.shape == n_labels.shape == (1, 1, 5)
assert u_labels.end_of_sweep is n_labels.end_of_sweep is False
assert u_labels.num_sequences == u_labels.num_sequences
assert u_labels.num_samples == u_labels.num_samples
assert u_labels.is_sparse is n_labels.is_sparse is False
u_label_data = u_labels.asarray()
n_label_data = n_labels.asarray()
assert np.allclose(u_label_data, n_label_data)
n_mb = n_mb_source.next_minibatch(10)
n_features = n_mb[n_features_si]
n_labels = n_mb[n_labels_si]
u_mb = u_mb_source.next_minibatch(10, 1, 0)
u_features = u_mb[u_features_si]
u_labels = u_mb[u_labels_si]
assert u_labels.shape == n_labels.shape
u_label_data = u_labels.asarray()
n_label_data = n_labels.asarray()
assert np.allclose(u_label_data, n_label_data)
assert u_features.end_of_sweep is u_labels.end_of_sweep is True
assert u_features.num_samples == n_features.num_samples
assert u_features.num_sequences == n_features.num_sequences
@pytest.mark.parametrize("with_checkpoint_impl", [True, False])
def test_usermbsource_training(tmpdir, with_checkpoint_impl):
input_dim = 1000
num_output_classes = 5
mbs = MyDataSource(input_dim, num_output_classes)
# Using this for testing the UserMinibatchSource checkpointing
if with_checkpoint_impl:
MBS_CV_CLASS = MyDataSourceWithCheckpoint
else:
MBS_CV_CLASS = MyDataSource
mbs_cv = MBS_CV_CLASS(input_dim, num_output_classes)
from cntk import sequence, parameter, plus, cross_entropy_with_softmax, \
classification_error, learning_parameter_schedule_per_sample, sgd, Trainer, \
training_session, times
feature = sequence.input_variable(shape=(input_dim,))
label = C.input_variable(shape=(num_output_classes,))
p = parameter(shape=(input_dim, num_output_classes), init=10)
z = times(sequence.reduce_sum(feature), p, name='z')
ce = cross_entropy_with_softmax(z, label)
errs = classification_error(z, label)
#having a large learning rate to prevent the model from converging earlier where not all the intended samples are fed
#note that training session can end earlier if there is no updates
lr_per_sample = learning_parameter_schedule_per_sample(0.3)
learner = sgd(z.parameters, lr_per_sample)
trainer = Trainer(z, (ce, errs), [learner])
input_map = {
feature: mbs.fsi,
label: mbs.lsi
}
session = training_session(
trainer=trainer, mb_source=mbs,
model_inputs_to_streams=input_map,
mb_size=4, max_samples=20,
cv_config = C.CrossValidationConfig(minibatch_source=mbs_cv, max_samples=10,
minibatch_size=2)
)
session.train()
assert trainer.total_number_of_samples_seen == 20
if with_checkpoint_impl:
assert mbs_cv._restore_from_checkpoint_calls == 1
def test_minibatch_defined_by_labels(tmpdir):
input_dim = 1000
num_output_classes = 5
def assert_data(mb_source):
features_si = mb_source.stream_info('features')
labels_si = mb_source.stream_info('labels')
mb = mb_source.next_minibatch(2)
features = mb[features_si]
# 2 samples, max seq len 4, 1000 dim
assert features.shape == (2, 4, input_dim)
assert features.end_of_sweep
assert features.num_sequences == 2
assert features.num_samples == 7
assert features.is_sparse
labels = mb[labels_si]
# 2 samples, max seq len 1, 5 dim
assert labels.shape == (2, 1, num_output_classes)
assert labels.end_of_sweep
assert labels.num_sequences == 2
assert labels.num_samples == 2
assert not labels.is_sparse
label_data = labels.asarray()
assert np.allclose(label_data,
np.asarray([
[[1., 0., 0., 0., 0.]],
[[0., 1., 0., 0., 0.]]
]))
mb = mb_source.next_minibatch(3)
features = mb[features_si]
labels = mb[labels_si]
assert features.num_samples == 10
assert labels.num_samples == 3
tmpfile = _write_data(tmpdir, MBDATA_SPARSE)
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features=StreamDef(field='x', shape=input_dim, is_sparse=True),
labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False, defines_mb_size=True)
)), randomize=False)
assert_data(mb_source)
tmpfile1 = _write_data(tmpdir, MBDATA_SPARSE1, '1')
tmpfile2 = _write_data(tmpdir, MBDATA_SPARSE2, '2')
combined_mb_source = MinibatchSource([ CTFDeserializer(tmpfile1, StreamDefs(
features=StreamDef(field='x', shape=input_dim, is_sparse=True))),
CTFDeserializer(tmpfile2, StreamDefs(
labels=StreamDef(field='y', shape=num_output_classes, is_sparse=False, defines_mb_size=True)
))], randomize=False)
assert_data(combined_mb_source)
# Create base64 and usual image deserializers
# and check that they give equal minibatch data on
# the same input images
def test_base64_is_equal_image(tmpdir):
import io, base64; from PIL import Image
np.random.seed(1)
file_mapping_path = str(tmpdir / 'file_mapping.txt')
base64_mapping_path = str(tmpdir / 'base64_mapping.txt')
with open(file_mapping_path, 'w') as file_mapping:
with open(base64_mapping_path, 'w') as base64_mapping:
for i in range(10):
data = np.random.randint(0, 2**8, (5,7,3))
image = Image.fromarray(data.astype('uint8'), "RGB")
buf = io.BytesIO()
image.save(buf, format='PNG')
assert image.width == 7 and image.height == 5
label = str(i)
# save to base 64 mapping file
encoded = base64.b64encode(buf.getvalue()).decode('ascii')
base64_mapping.write('%s\t%s\n' % (label, encoded))
# save to mapping + png file
file_name = label + '.png'
with open(str(tmpdir/file_name), 'wb') as f:
f.write(buf.getvalue())
file_mapping.write('.../%s\t%s\n' % (file_name, label))
transforms = [xforms.scale(width=7, height=5, channels=3)]
b64_deserializer = Base64ImageDeserializer(base64_mapping_path,
StreamDefs(
images1=StreamDef(field='image', transforms=transforms),
labels1=StreamDef(field='label', shape=10)))
file_image_deserializer = ImageDeserializer(file_mapping_path,
StreamDefs(
images2=StreamDef(field='image', transforms=transforms),
labels2=StreamDef(field='label', shape=10)))
mb_source = MinibatchSource([b64_deserializer, file_image_deserializer])
for j in range(20):
mb = mb_source.next_minibatch(1)
images1_stream = mb_source.streams['images1']
images1 = mb[images1_stream].asarray()
images2_stream = mb_source.streams['images2']
images2 = mb[images2_stream].asarray()
assert(images1 == images2).all()
def test_crop_dimensionality(tmpdir):
import io; from PIL import Image
np.random.seed(1)
file_mapping_path = str(tmpdir / 'file_mapping.txt')
with open(file_mapping_path, 'w') as file_mapping:
for i in range(5):
data = np.random.randint(0, 2**8, (20, 40, 3))
image = Image.fromarray(data.astype('uint8'), "RGB")
buf = io.BytesIO()
image.save(buf, format='PNG')
assert image.width == 40 and image.height == 20
label = str(i)
# save to mapping + png file
file_name = label + '.png'
with open(str(tmpdir/file_name), 'wb') as f:
f.write(buf.getvalue())
file_mapping.write('.../%s\t%s\n' % (file_name, label))
transforms1 = [
xforms.scale(width=40, height=20, channels=3),
xforms.crop(crop_type='randomside',
crop_size=(20, 10), side_ratio=(0.2, 0.5),
jitter_type='uniratio')]
transforms2 = [
xforms.crop(crop_type='randomside',
crop_size=(20, 10), side_ratio=(0.2, 0.5),
jitter_type='uniratio')]
d1 = ImageDeserializer(file_mapping_path,
StreamDefs(
images1=StreamDef(field='image', transforms=transforms1),
labels1=StreamDef(field='label', shape=10)))
d2 = ImageDeserializer(file_mapping_path,
StreamDefs(
images2=StreamDef(field='image', transforms=transforms2),
labels2=StreamDef(field='label', shape=10)))
mbs = MinibatchSource([d1, d2])
for j in range(5):
mb = mbs.next_minibatch(1)
images1 = mb[mbs.streams.images1].asarray()
images2 = mb[mbs.streams.images2].asarray()
assert images1.shape == (1, 1, 3, 10, 20)
assert (images1 == images2).all()
def test_prefetch_with_unpacking(tmpdir):
data = r'''0 |S0 1 1 1 1 |S1 1000
1 |S0 2 2 2 2 |S1 100
2 |S0 3 3 3 3 |S1 100
3 |S0 1 1 1 1 |S1 10
4 |S0 2 2 2 2 |S1 1
5 |S0 3 3 3 3 |S1 2000
6 |S0 1 1 1 1 |S1 200
7 |S0 2 2 2 2 |S1 200
8 |S0 3 3 3 3 |S1 20
9 |S0 1 1 1 1 |S1 2
'''
import time
tmpfile = _write_data(tmpdir, data)
input_dim = 4
num_output_classes = 1
mb_source = MinibatchSource(CTFDeserializer(tmpfile, StreamDefs(
features=StreamDef(field='S0', shape=input_dim, is_sparse=False),
labels=StreamDef(field='S1', shape=num_output_classes, is_sparse=False)
)), randomize=False, max_samples=FULL_DATA_SWEEP)
input_map = { 'S0' : mb_source.streams.features, 'S1' : mb_source.streams.labels }
empty = False
mb_size = 3
# On the last minibatch there will be resize called,
# due to 10%3 = 1 sample in the minibatch
while not empty:
mb = mb_source.next_minibatch(mb_size, input_map=input_map)
time.sleep(1) # make sure the prefetch kicks in
if mb:
# Force unpacking to check that we do
# not break prefetch
actual_size = mb['S0'].shape[0]
assert (mb['S0'].asarray() == np.array([[[1, 1, 1, 1]],
[[2, 2, 2, 2]],
[[3, 3, 3, 3]]], dtype=np.float32)[0:actual_size]).all()
else:
empty = True
def get_cbf_header(streams):
get_header_line = lambda x,y: \
[x, y.stream_alias, 'sparse' if y.is_sparse else 'dense', str(y.dim)]
return [' '.join(get_header_line(k,v)) for k,v in streams.items()]
input_files = [
MBDATA_DENSE_1,
MBDATA_DENSE_2,
MBDATA_SPARSE,
MBDATA_SPARSE1,
MBDATA_SPARSE2
]
stream_defs = [
StreamDefs(
features=StreamDef(field='S0', shape=1),
labels=StreamDef(field='S1', shape=1)
),
StreamDefs(
features=StreamDef(field='S0', shape=1),
labels=StreamDef(field='S1', shape=1)
),
StreamDefs(
features=StreamDef(field='x', shape=1000, is_sparse=True),
labels=StreamDef(field='y', shape=5, is_sparse=False)
),
StreamDefs(
features=StreamDef(field='x', shape=1000, is_sparse=True),
),
StreamDefs(
labels=StreamDef(field='y', shape=5, is_sparse=False)
),
]
@pytest.mark.parametrize("input_pair", zip(input_files, stream_defs))
def test_compare_cbf_and_ctf(input_pair, device_id, tmpdir):
try:
import ctf2bin
except ImportError:
pytest.skip("ctf2bin not found")
device = cntk_device(device_id)
tmpfile = _write_data(tmpdir, input_pair[0])
streams = input_pair[1]
ctf2bin.process(tmpfile, tmpfile+'.bin', get_cbf_header(streams), ctf2bin.ElementType.FLOAT)
def compare_cbf_and_ctf(num_mbs, mb_size, randomize):
ctf = MinibatchSource(CTFDeserializer(tmpfile, streams), randomize=randomize)
cbf = MinibatchSource(CBFDeserializer(tmpfile+'.bin', streams), randomize=randomize)
ctf_stream_names = sorted([x.m_name for x in ctf.stream_infos()])
cbf_stream_names = sorted([x.m_name for x in cbf.stream_infos()])
assert(ctf_stream_names == cbf_stream_names)
for _ in range(num_mbs):
ctf_mb = ctf.next_minibatch(mb_size, device=device)
cbf_mb = cbf.next_minibatch(mb_size, device=device)
for name in cbf_stream_names:
ctf_data = ctf_mb[ctf[name]]
cbf_data = cbf_mb[cbf[name]]
assert ctf_data.num_samples == cbf_data.num_samples
assert ctf_data.num_sequences == cbf_data.num_sequences
assert ctf_data.shape == cbf_data.shape
assert ctf_data.end_of_sweep == cbf_data.end_of_sweep
assert ctf_data.is_sparse == cbf_data.is_sparse
assert ctf_data.data.masked_count() == cbf_data.data.masked_count()
# XXX:
# assert(ctf_data.asarray() == cbf_data.asarray()).all()
# not using asarray because for sparse values it fails with
# some strange exception "sum of the rank of the mask and Variable
#rank does not equal the Value's rank".
assert C.cntk_py.are_equal(ctf_data.data.data, cbf_data.data.data)
if (ctf_data.data.masked_count() > 0):
assert (ctf_data.data.mask == cbf_data.data.mask).all()
# XXX: if mask_count is zero, mb_data.data.mask fails with
# "AttributeError: 'Value' object has no attribute 'mask'"!
# XXX: without invoking erase, next_minibatch will fail with:
# "Resize: Cannot resize the matrix because it is a view."
ctf_data.data.erase()
cbf_data.data.erase()
for randomize in [False, True]:
for (num_mbs, mb_size) in zip([1, 1, 3, 10], [1, 10, 100, 2]):
compare_cbf_and_ctf(num_mbs, mb_size, randomize)
class SimpleDeserailizer(UserDeserializer):
def __init__(self, stream_infos, chunk_data):
super(SimpleDeserailizer, self).__init__()
self._streams = stream_infos
self._chunk_data = chunk_data
def stream_infos(self):
return self._streams
def num_chunks(self):
return len(self._chunk_data)
def get_chunk(self, chunk_id):
return self._chunk_data[chunk_id % self.num_chunks()]
def test_user_deserializer():
N = 5
N_seq = 3
chunk_data = [
{'x': [d for d in np.arange(N * N_seq * 3).reshape((N, N_seq, 3)).astype(np.float32)],
'y': [d for d in np.arange(N * 1).reshape((N, 1)).astype(np.float32)],
},
{'x': [d for d in np.arange(N * N_seq * 3).reshape((N, N_seq, 3)).astype(np.float32)],
'y': [d for d in np.arange(N * 1).reshape((N, 1)).astype(np.float32)],
}
]
x = C.sequence.input_variable(3, name='x')
y = C.input_variable(1, name='y')
#test StreamInformation with defines_mb_size set
d = SimpleDeserailizer(stream_infos=[StreamInformation('x', 0, 'dense', np.float32, (3,)),
StreamInformation('y', 1, 'dense', np.float32, (1,), True)],
chunk_data=chunk_data)
mbs = MinibatchSource([d], randomize=False)
input_map = {x: mbs['x'], y: mbs['y']}
mb_size = 5
batch = mbs.next_minibatch(mb_size, input_map)
assert(batch[x].number_of_sequences == mb_size)
assert(batch[x].number_of_samples == mb_size * N_seq)
assert(batch[y].number_of_sequences == mb_size)
assert(batch[y].number_of_samples == mb_size)
#test StreamInformation without defines_mb_size set
d = SimpleDeserailizer(stream_infos=[StreamInformation('x', 0, 'dense', np.float32, (3,)),
StreamInformation('y', 1, 'dense', np.float32, (1,))],
chunk_data=chunk_data)
mbs = MinibatchSource([d], randomize=False)
input_map = {x: mbs['x'], y: mbs['y']}
batch = mbs.next_minibatch(mb_size, input_map)
# one sequence with 3 samples is retrieved
assert(batch[x].number_of_sequences == 1)
assert(batch[x].number_of_samples == N_seq)
assert(batch[y].number_of_sequences == 1)
assert(batch[y].number_of_samples == 1)
#test more than one defines_mb_size set
with pytest.raises(Exception):
d = SimpleDeserailizer(stream_infos=[StreamInformation('x', 0, 'dense', np.float32, (3,), True),
StreamInformation('y', 1, 'dense', np.float32, (1,), True)],
chunk_data=chunk_data)
mbs = MinibatchSource([d], randomize=False)
input_map = {x: mbs['x'], y: mbs['y']}
batch = mbs.next_minibatch(5, input_map)
# Helper generator
class GenDeserializer(UserDeserializer):
def __init__(self, stream_infos, num_chunks, num_sequences, max_sequence_len = 1):
super(GenDeserializer, self).__init__()
self._streams = stream_infos
self._num_chunks = num_chunks
self._num_sequences = num_sequences
self._max_sequence_len = max_sequence_len
self._chunk_list = []
def stream_infos(self):
return self._streams;
def num_chunks(self):
return self._num_chunks
def get_chunk(self, chunk_id):
import scipy.sparse as sp, random, functools
random.seed(chunk_id)
result = {}
for stream in self._streams:
total = functools.reduce(lambda x, y: x*y, stream.sample_shape)
count = 0
chunk = []
for i in range(self._num_sequences):
if self._max_sequence_len == 1:
shape = stream.sample_shape
else:
seq_len = random.randint(1, self._max_sequence_len)
shape = [seq_len]
shape.extend(stream.sample_shape)
shape = tuple(shape) if stream.storage_format == 'dense' else (shape[0], total)
if stream.storage_format == 'dense':
data = np.full(shape=shape, fill_value=chunk_id, dtype=np.float32)
else:
data = np.full(shape=shape, fill_value=chunk_id, dtype=np.float32)
data = sp.csr_matrix(data, shape=shape, dtype=np.float32)
chunk.append(data)
if stream.storage_format == 'dense':
result[stream.name] = chunk if self._max_sequence_len != 1 else np.stack(chunk)
else: # sparse
result[stream.name] = chunk if self._max_sequence_len != 1 else sp.csr_matrix(sp.vstack(chunk))
self._chunk_list.append(result)
return result
def get_chunks_list(self):
"""
Get the list of all chunks returned so far.
:return:
"""
return self._chunk_list
def test_user_deserializer_memory_leak():
"""
Test to check that reference counts of chunk objects and their members aren't increased in swig userdeserializer code.
:return:
"""
import sys
streams = [StreamInformation('x', 0, 'dense', np.float32, (2, 3)), StreamInformation('y', 1, 'sparse', np.float32, (1, 3))]
d = GenDeserializer(stream_infos=streams, num_chunks=5, num_sequences=100)
mbs = MinibatchSource([d], randomize=False, max_sweeps=2)
while True:
batch = mbs.next_minibatch(20)
if not batch:
break
all_chunks = d.get_chunks_list()
for chunk in all_chunks:
chunk_refs = sys.getrefcount(chunk)
# 5, 4 or 3= list member + local variable + getrefcount argument. + (sometimes) upto 2 global optimization reference
assert chunk_refs <= 5
for stream in streams:
stream_refs = sys.getrefcount(chunk[stream.name])
assert stream_refs == 2
if stream.storage_format == 'sparse':
indices_refs = sys.getrefcount(chunk[stream.name].indices)
indptr_refs = sys.getrefcount(chunk[stream.name].indptr)
data_refs = sys.getrefcount(chunk[stream.name].data)
# 2 = reference within sparse object + getrefcount argument
assert indices_refs == 2
assert indptr_refs == 2
assert data_refs == 2
else:
data_refs = sys.getrefcount(chunk[stream.name].data)
# 1 = getrefcount argument. A new memoryview object is created on every call to data, so no count from within object.
assert data_refs == 1
def test_user_deserializer_sample_mode():
import scipy.sparse as sp
streams = [StreamInformation('x', 0, 'dense', np.float32, (2, 3)),
StreamInformation('y', 1, 'sparse', np.float32, (1, 3))]
def run_minibatch_source(minibatch_source, num_chunks, num_samples_per_value):
sample_x_values = np.zeros(num_chunks, dtype=np.int32)
sample_y_values = np.zeros(num_chunks, dtype=np.int32)
mb_count = 0
while True:
if mb_count % 10 == 1: # perform checkpointing
checkpoint_state = minibatch_source.get_checkpoint_state()
for i in range(3):
minibatch_source.next_minibatch(20)
minibatch_source.restore_from_checkpoint(checkpoint_state)
mb_count +=1
continue
mb = minibatch_source.next_minibatch(20)
mb_count += 1
if not mb:
break
for sequence in mb[minibatch_source.streams.x].asarray():
for sample in sequence:
value = int(sample[0][0])
sample_x_values[value] += 1
for sequence in mb[minibatch_source.streams.y].asarray():
for sample in sequence:
value = int(sample[0][0])
sample_y_values[value] += 1
mb = None
expected_values = np.full(num_chunks, fill_value=num_samples_per_value, dtype=np.int32)
assert (sample_x_values == expected_values).all()
assert (sample_y_values == expected_values).all()
# Big chunks
d = GenDeserializer(stream_infos=streams, num_chunks=20, num_sequences=100)
mbs = MinibatchSource([d], randomize=False, max_sweeps=2)
run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=200)
# Randomized
mbs = MinibatchSource([d], randomize=True, max_sweeps=2, randomization_window_in_chunks=5)
run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=200)
# Small chunks of 1
d = GenDeserializer(stream_infos=streams, num_chunks=20, num_sequences=1)
mbs = MinibatchSource([d], randomize=False, max_sweeps=3)
run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=3)
# Randomized
mbs = MinibatchSource([d], randomize=True, max_sweeps=3, randomization_window_in_chunks=5)
run_minibatch_source(mbs, num_chunks=20, num_samples_per_value=3)
def test_user_deserializer_sequence_mode():
import scipy.sparse as sp
streams = [StreamInformation('x', 0, 'dense', np.float32, (2, 3)),
StreamInformation('y', 1, 'sparse', np.float32, (3,))]
def run_minibatch_source(minibatch_source, num_chunks, num_sequences_per_value):
sequence_x_values = np.zeros(num_chunks, dtype=np.int32)
sequence_y_values = np.zeros(num_chunks, dtype=np.int32)
mb_count = 0
while True:
if mb_count % 10 == 1: # perform checkpointing
checkpoint_state = minibatch_source.get_checkpoint_state()
for i in range(3):
minibatch_source.next_minibatch(20)
minibatch_source.restore_from_checkpoint(checkpoint_state)
mb_count +=1
continue
mb = minibatch_source.next_minibatch(20)
mb_count += 1
if not mb:
break
for sequence in mb[minibatch_source.streams.x].asarray():
sequence_x_values[int(sequence[0][0][0])] +=1
for sequence in mb[minibatch_source.streams.y].as_sequences(C.sequence.input_variable((3,), True)):
sequence_y_values[int(sequence.toarray()[0][0])] += 1
mb = None
expected_values = np.full(num_chunks, fill_value=num_sequences_per_value, dtype=np.int32)
assert (sequence_x_values == expected_values).all()
assert (sequence_y_values == expected_values).all()
# Big chunks
d = GenDeserializer(stream_infos=streams, num_chunks=15,
num_sequences=100, max_sequence_len=10)
mbs = MinibatchSource([d], randomize=False, max_sweeps=2)
state = mbs.get_checkpoint_state()
mbs.restore_from_checkpoint(state)
run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=200)
# Randomized
mbs = MinibatchSource([d], randomize=True, max_sweeps=2, randomization_window_in_chunks=5)
state = mbs.get_checkpoint_state()
mbs.restore_from_checkpoint(state)
run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=200)
# Small chunks of 1
d = GenDeserializer(stream_infos=streams, num_chunks=15,
num_sequences=1, max_sequence_len=10)
mbs = MinibatchSource([d], randomize=False, max_sweeps=3)
run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=3)
# Randomized
mbs = MinibatchSource([d], randomize=True, max_sweeps=3, randomization_window_in_chunks=5)
run_minibatch_source(mbs, num_chunks=15, num_sequences_per_value=3)
def test_index_caching(tmpdir):
pytest.skip("test_index_caching is disabled")
import os, time, glob, uuid
MB = 1 << 20
data = MBDATA_DENSE_1
while(len(data) < 64 * MB):
data += data
timeWithoutCache, timeWithCache = 0, 0
cpu=C.device.cpu()
streams = stream_defs[0]
for _ in range(3):
tmpfile = _write_data(tmpdir, data, str(uuid.uuid4()))
cache_files = glob.glob(str(tmpdir + '/*.cache'))
for cache_file in cache_files:
os.remove(cache_file)
config = CTFDeserializer(tmpfile, streams)
config['cacheIndex'] = C.cntk_py.DictionaryValue(True)
start = time.time()
MinibatchSource(config, randomize=False).next_minibatch(1, device=cpu)
end = time.time()
timeWithoutCache += (end - start)
time.sleep(5)
cache_files = glob.glob(str(tmpdir + '/*.cache'))
assert len(cache_files) == 1
start = time.time()
MinibatchSource(config, randomize=False).next_minibatch(1, device=cpu)
end = time.time()
os.remove(tmpfile)
timeWithCache += (end - start)
assert timeWithCache < timeWithoutCache
def test_composite_source_synced_transforms(tmpdir):
from PIL import Image
np.random.seed(1)
tmpmap = str(tmpdir/'sync_test.map')
with open(tmpmap, 'w') as f:
for i in range(10):
data = np.random.randint(0, 2**8, (224,224,3))
image = Image.fromarray(data.astype('uint8'), "RGB")
tmpjpg = str(tmpdir/('%d.jpg'%i))
image.save(tmpjpg)
f.write("%s\t0\n"%tmpjpg)
def create_reader(map_file1, map_file2):
transforms = [xforms.crop(crop_type='randomside', side_ratio=0.8, jitter_type='uniratio'), xforms.scale(width=224, height=224, channels=3, interpolations='linear')]
source1 = C.io.ImageDeserializer(map_file1, C.io.StreamDefs(
source_image = C.io.StreamDef(field='image', transforms=transforms)))
source2 = C.io.ImageDeserializer(map_file2, C.io.StreamDefs(
target_image = C.io.StreamDef(field='image', transforms=transforms)))
return C.io.MinibatchSource([source1, source2], max_samples=sys.maxsize, randomize=True, multithreaded_deserializer=False)
x = C.input_variable((3,224,224))
y = C.input_variable((3,224,224))
loss = C.squared_error(x, y)
reader = create_reader(tmpmap, tmpmap)
minibatch_size = 2
input_map={
x: reader.streams.source_image,
y: reader.streams.target_image
}
for i in range(30):
data=reader.next_minibatch(minibatch_size, input_map=input_map)
assert np.allclose(loss.eval(data), np.zeros(minibatch_size))
|
tests/openbb_terminal/stocks/fundamental_analysis/test_av_model.py | tehcoderer/GamestonkTerminal | 255 | 12630631 | # IMPORTATION STANDARD
# IMPORTATION THIRDPARTY
import pytest
# IMPORTATION INTERNAL
from openbb_terminal.stocks.fundamental_analysis import av_model
@pytest.fixture(scope="module")
def vcr_config():
return {
"filter_headers": [("User-Agent", None)],
"filter_query_parameters": [
("apikey", "MOCK_API_KEY"),
],
}
@pytest.mark.vcr(record_mode="none")
@pytest.mark.parametrize(
"func, kwargs_dict",
[
("get_overview", {"ticker": "TSLA"}),
("get_key_metrics", {"ticker": "TSLA"}),
("get_earnings", {"ticker": "TSLA", "quarterly": True}),
],
)
def test_invalid_response_status(func, kwargs_dict, mocker):
# MOCK GET
attrs = {
"json.return_value": {"Error Message": "mock error message"},
}
mock_response = mocker.Mock(**attrs)
mocker.patch(
target="requests.get",
new=mocker.Mock(return_value=mock_response),
)
result_df = getattr(av_model, func)(**kwargs_dict)
assert result_df.empty
@pytest.mark.vcr
@pytest.mark.parametrize(
"func, kwargs_dict",
[
("get_overview", {"ticker": "TSLA"}),
("get_key_metrics", {"ticker": "TSLA"}),
("get_earnings", {"ticker": "TSLA", "quarterly": True}),
("get_earnings", {"ticker": "TSLA", "quarterly": False}),
("get_income_statements", {"ticker": "TSLA", "number": 5, "quarterly": True}),
("get_income_statements", {"ticker": "TSLA", "number": 5, "quarterly": False}),
("get_balance_sheet", {"ticker": "TSLA", "number": 5, "quarterly": True}),
("get_balance_sheet", {"ticker": "TSLA", "number": 5, "quarterly": False}),
("get_cash_flow", {"ticker": "TSLA", "number": 5, "quarterly": True}),
("get_cash_flow", {"ticker": "TSLA", "number": 5, "quarterly": False}),
],
)
def test_check_output(func, kwargs_dict, recorder):
result_df = getattr(av_model, func)(**kwargs_dict)
recorder.capture(result_df)
@pytest.mark.vcr
def test_get_fraud_ratios(recorder):
result_df = av_model.get_fraud_ratios(ticker="TSLA")
recorder.capture(result_df)
@pytest.mark.vcr
def test_get_dupont(recorder):
result_df = av_model.get_dupont(ticker="TSLA")
recorder.capture(result_df)
|
628 Maximum Product of Three Numbers.py | krishna13052001/LeetCode | 872 | 12630660 | <reponame>krishna13052001/LeetCode
#!/usr/bin/python3
"""
Given an integer array, find three numbers whose product is maximum and output
the maximum product.
Example 1:
Input: [1,2,3]
Output: 6
Example 2:
Input: [1,2,3,4]
Output: 24
Note:
The length of the given array will be in range [3,104] and all elements are in
the range [-1000, 1000].
Multiplication of any three numbers in the input won't exceed the range of
32-bit signed integer.
"""
import heapq
from typing import List
class Solution:
def maximumProduct(self, nums: List[int]) -> int:
"""
heapq nlargest nsmallest
"""
mxes = heapq.nlargest(3, nums)
mns = heapq.nsmallest(3, nums)
return max(
mxes[0] * mxes[1] * mxes[2],
mns[0] * mns[1] * mxes[0],
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.